]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / steering / dr_domain.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include <linux/mlx5/eswitch.h>
5 #include "dr_types.h"
6
7 static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
8 {
9 /* Per vport cached FW FT for checksum recalculation, this
10 * recalculation is needed due to a HW bug.
11 */
12 dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
13 sizeof(dmn->cache.recalc_cs_ft[0]),
14 GFP_KERNEL);
15 if (!dmn->cache.recalc_cs_ft)
16 return -ENOMEM;
17
18 return 0;
19 }
20
21 static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
22 {
23 int i;
24
25 for (i = 0; i < dmn->info.caps.num_vports; i++) {
26 if (!dmn->cache.recalc_cs_ft[i])
27 continue;
28
29 mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
30 }
31
32 kfree(dmn->cache.recalc_cs_ft);
33 }
34
35 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
36 u32 vport_num,
37 u64 *rx_icm_addr)
38 {
39 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
40
41 recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
42 if (!recalc_cs_ft) {
43 /* Table not in cache, need to allocate a new one */
44 recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
45 if (!recalc_cs_ft)
46 return -EINVAL;
47
48 dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
49 }
50
51 *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
52
53 return 0;
54 }
55
56 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
57 {
58 int ret;
59
60 ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
61 if (ret) {
62 mlx5dr_dbg(dmn, "Couldn't allocate PD\n");
63 return ret;
64 }
65
66 dmn->uar = mlx5_get_uars_page(dmn->mdev);
67 if (!dmn->uar) {
68 mlx5dr_err(dmn, "Couldn't allocate UAR\n");
69 ret = -ENOMEM;
70 goto clean_pd;
71 }
72
73 dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
74 if (!dmn->ste_icm_pool) {
75 mlx5dr_err(dmn, "Couldn't get icm memory\n");
76 ret = -ENOMEM;
77 goto clean_uar;
78 }
79
80 dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
81 if (!dmn->action_icm_pool) {
82 mlx5dr_err(dmn, "Couldn't get action icm memory\n");
83 ret = -ENOMEM;
84 goto free_ste_icm_pool;
85 }
86
87 ret = mlx5dr_send_ring_alloc(dmn);
88 if (ret) {
89 mlx5dr_err(dmn, "Couldn't create send-ring\n");
90 goto free_action_icm_pool;
91 }
92
93 return 0;
94
95 free_action_icm_pool:
96 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
97 free_ste_icm_pool:
98 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
99 clean_uar:
100 mlx5_put_uars_page(dmn->mdev, dmn->uar);
101 clean_pd:
102 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
103
104 return ret;
105 }
106
107 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
108 {
109 mlx5dr_send_ring_free(dmn, dmn->send_ring);
110 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
111 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
112 mlx5_put_uars_page(dmn->mdev, dmn->uar);
113 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
114 }
115
116 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
117 bool other_vport,
118 u16 vport_number)
119 {
120 struct mlx5dr_cmd_vport_cap *vport_caps;
121 int ret;
122
123 vport_caps = &dmn->info.caps.vports_caps[vport_number];
124
125 ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
126 other_vport,
127 vport_number,
128 &vport_caps->icm_address_rx,
129 &vport_caps->icm_address_tx);
130 if (ret)
131 return ret;
132
133 ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
134 other_vport,
135 vport_number,
136 &vport_caps->vport_gvmi);
137 if (ret)
138 return ret;
139
140 vport_caps->num = vport_number;
141 vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
142
143 return 0;
144 }
145
146 static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
147 {
148 struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
149 struct mlx5dr_cmd_vport_cap *wire_vport;
150 int vport;
151 int ret;
152
153 /* Query vports (except wire vport) */
154 for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
155 ret = dr_domain_query_vport(dmn, !!vport, vport);
156 if (ret)
157 return ret;
158 }
159
160 /* Last vport is the wire port */
161 wire_vport = &dmn->info.caps.vports_caps[vport];
162 wire_vport->num = WIRE_PORT;
163 wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
164 wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
165 wire_vport->vport_gvmi = 0;
166 wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
167
168 return 0;
169 }
170
171 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
172 struct mlx5dr_domain *dmn)
173 {
174 int ret;
175
176 if (!dmn->info.caps.eswitch_manager)
177 return -EOPNOTSUPP;
178
179 ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
180 if (ret)
181 return ret;
182
183 dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
184 dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
185 dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
186
187 dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
188 sizeof(dmn->info.caps.vports_caps[0]),
189 GFP_KERNEL);
190 if (!dmn->info.caps.vports_caps)
191 return -ENOMEM;
192
193 ret = dr_domain_query_vports(dmn);
194 if (ret) {
195 mlx5dr_dbg(dmn, "Failed to query vports caps\n");
196 goto free_vports_caps;
197 }
198
199 dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
200
201 return 0;
202
203 free_vports_caps:
204 kfree(dmn->info.caps.vports_caps);
205 dmn->info.caps.vports_caps = NULL;
206 return ret;
207 }
208
209 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
210 struct mlx5dr_domain *dmn)
211 {
212 struct mlx5dr_cmd_vport_cap *vport_cap;
213 int ret;
214
215 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
216 mlx5dr_dbg(dmn, "Failed to allocate domain, bad link type\n");
217 return -EOPNOTSUPP;
218 }
219
220 dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
221
222 ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
223 if (ret)
224 return ret;
225
226 ret = dr_domain_query_fdb_caps(mdev, dmn);
227 if (ret)
228 return ret;
229
230 switch (dmn->type) {
231 case MLX5DR_DOMAIN_TYPE_NIC_RX:
232 if (!dmn->info.caps.rx_sw_owner)
233 return -ENOTSUPP;
234
235 dmn->info.supp_sw_steering = true;
236 dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
237 dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
238 dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
239 break;
240 case MLX5DR_DOMAIN_TYPE_NIC_TX:
241 if (!dmn->info.caps.tx_sw_owner)
242 return -ENOTSUPP;
243
244 dmn->info.supp_sw_steering = true;
245 dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
246 dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
247 dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
248 break;
249 case MLX5DR_DOMAIN_TYPE_FDB:
250 if (!dmn->info.caps.eswitch_manager)
251 return -ENOTSUPP;
252
253 if (!dmn->info.caps.fdb_sw_owner)
254 return -ENOTSUPP;
255
256 dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
257 dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
258 vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
259 if (!vport_cap) {
260 mlx5dr_dbg(dmn, "Failed to get esw manager vport\n");
261 return -ENOENT;
262 }
263
264 dmn->info.supp_sw_steering = true;
265 dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
266 dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
267 dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
268 dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
269 break;
270 default:
271 mlx5dr_dbg(dmn, "Invalid domain\n");
272 ret = -EINVAL;
273 break;
274 }
275
276 return ret;
277 }
278
279 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
280 {
281 kfree(dmn->info.caps.vports_caps);
282 }
283
284 struct mlx5dr_domain *
285 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
286 {
287 struct mlx5dr_domain *dmn;
288 int ret;
289
290 if (type > MLX5DR_DOMAIN_TYPE_FDB)
291 return NULL;
292
293 dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
294 if (!dmn)
295 return NULL;
296
297 dmn->mdev = mdev;
298 dmn->type = type;
299 refcount_set(&dmn->refcount, 1);
300 mutex_init(&dmn->mutex);
301
302 if (dr_domain_caps_init(mdev, dmn)) {
303 mlx5dr_dbg(dmn, "Failed init domain, no caps\n");
304 goto free_domain;
305 }
306
307 dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
308 dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
309 dmn->info.caps.log_icm_size);
310
311 if (!dmn->info.supp_sw_steering) {
312 mlx5dr_err(dmn, "SW steering is not supported\n");
313 goto uninit_caps;
314 }
315
316 /* Allocate resources */
317 ret = dr_domain_init_resources(dmn);
318 if (ret) {
319 mlx5dr_err(dmn, "Failed init domain resources\n");
320 goto uninit_caps;
321 }
322
323 ret = dr_domain_init_cache(dmn);
324 if (ret) {
325 mlx5dr_err(dmn, "Failed initialize domain cache\n");
326 goto uninit_resourses;
327 }
328
329 return dmn;
330
331 uninit_resourses:
332 dr_domain_uninit_resources(dmn);
333 uninit_caps:
334 dr_domain_caps_uninit(dmn);
335 free_domain:
336 kfree(dmn);
337 return NULL;
338 }
339
340 /* Assure synchronization of the device steering tables with updates made by SW
341 * insertion.
342 */
343 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
344 {
345 int ret = 0;
346
347 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
348 mutex_lock(&dmn->mutex);
349 ret = mlx5dr_send_ring_force_drain(dmn);
350 mutex_unlock(&dmn->mutex);
351 if (ret)
352 return ret;
353 }
354
355 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
356 ret = mlx5dr_cmd_sync_steering(dmn->mdev);
357
358 return ret;
359 }
360
361 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
362 {
363 if (refcount_read(&dmn->refcount) > 1)
364 return -EBUSY;
365
366 /* make sure resources are not used by the hardware */
367 mlx5dr_cmd_sync_steering(dmn->mdev);
368 dr_domain_uninit_cache(dmn);
369 dr_domain_uninit_resources(dmn);
370 dr_domain_caps_uninit(dmn);
371 mutex_destroy(&dmn->mutex);
372 kfree(dmn);
373 return 0;
374 }
375
376 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
377 struct mlx5dr_domain *peer_dmn)
378 {
379 mutex_lock(&dmn->mutex);
380
381 if (dmn->peer_dmn)
382 refcount_dec(&dmn->peer_dmn->refcount);
383
384 dmn->peer_dmn = peer_dmn;
385
386 if (dmn->peer_dmn)
387 refcount_inc(&dmn->peer_dmn->refcount);
388
389 mutex_unlock(&dmn->mutex);
390 }