]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/infiniband/hw/mlx5/main.c
{IB,net}/mlx5: Move asynchronous mkey creation to mlx5_ib
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / hw / mlx5 / main.c
CommitLineData
e126ba97 1/*
6cf0a15f 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
fe248c3a 33#include <linux/debugfs.h>
adec640e 34#include <linux/highmem.h>
e126ba97
EC
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/errno.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/slab.h>
24da0016 41#include <linux/bitmap.h>
37aa5c36 42#if defined(CONFIG_X86)
eb243d1d 43#include <asm/memtype.h>
37aa5c36 44#endif
e126ba97 45#include <linux/sched.h>
6e84f315 46#include <linux/sched/mm.h>
0881e7bd 47#include <linux/sched/task.h>
7c2344c3 48#include <linux/delay.h>
e126ba97 49#include <rdma/ib_user_verbs.h>
3f89a643 50#include <rdma/ib_addr.h>
2811ba51 51#include <rdma/ib_cache.h>
ada68c31 52#include <linux/mlx5/port.h>
1b5daf11 53#include <linux/mlx5/vport.h>
72c7fe90 54#include <linux/mlx5/fs.h>
cecae747 55#include <linux/mlx5/eswitch.h>
7c2344c3 56#include <linux/list.h>
e126ba97
EC
57#include <rdma/ib_smi.h>
58#include <rdma/ib_umem.h>
038d2ef8
MG
59#include <linux/in.h>
60#include <linux/etherdevice.h>
e126ba97 61#include "mlx5_ib.h"
fc385b7a 62#include "ib_rep.h"
e1f24a79 63#include "cmd.h"
f3da6577 64#include "srq.h"
3346c487 65#include <linux/mlx5/fs_helpers.h>
c6475a0b 66#include <linux/mlx5/accel.h>
8c84660b 67#include <rdma/uverbs_std_types.h>
c6475a0b
AY
68#include <rdma/mlx5_user_ioctl_verbs.h>
69#include <rdma/mlx5_user_ioctl_cmds.h>
4061ff7a 70#include <rdma/ib_umem_odp.h>
8c84660b
MB
71
72#define UVERBS_MODULE_NAME mlx5_ib
73#include <rdma/uverbs_named_ioctl.h>
e126ba97
EC
74
75#define DRIVER_NAME "mlx5_ib"
b359911d 76#define DRIVER_VERSION "5.0-0"
e126ba97
EC
77
78MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
79MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
80MODULE_LICENSE("Dual BSD/GPL");
e126ba97 81
e126ba97
EC
82static char mlx5_version[] =
83 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
b359911d 84 DRIVER_VERSION "\n";
e126ba97 85
d69a24e0
DJ
86struct mlx5_ib_event_work {
87 struct work_struct work;
df097a27
SM
88 union {
89 struct mlx5_ib_dev *dev;
90 struct mlx5_ib_multiport_info *mpi;
91 };
92 bool is_slave;
134e9349 93 unsigned int event;
df097a27 94 void *param;
d69a24e0
DJ
95};
96
da7525d2
EBE
97enum {
98 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
99};
100
d69a24e0 101static struct workqueue_struct *mlx5_ib_event_wq;
32f69e4b
DJ
102static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
103static LIST_HEAD(mlx5_ib_dev_list);
104/*
105 * This mutex should be held when accessing either of the above lists
106 */
107static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
108
c44ef998
IL
109/* We can't use an array for xlt_emergency_page because dma_map_single
110 * doesn't work on kernel modules memory
111 */
112static unsigned long xlt_emergency_page;
113static struct mutex xlt_emergency_page_mutex;
114
32f69e4b
DJ
115struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
116{
117 struct mlx5_ib_dev *dev;
118
119 mutex_lock(&mlx5_ib_multiport_mutex);
120 dev = mpi->ibdev;
121 mutex_unlock(&mlx5_ib_multiport_mutex);
122 return dev;
123}
124
1b5daf11 125static enum rdma_link_layer
ebd61f68 126mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
1b5daf11 127{
ebd61f68 128 switch (port_type_cap) {
1b5daf11
MD
129 case MLX5_CAP_PORT_TYPE_IB:
130 return IB_LINK_LAYER_INFINIBAND;
131 case MLX5_CAP_PORT_TYPE_ETH:
132 return IB_LINK_LAYER_ETHERNET;
133 default:
134 return IB_LINK_LAYER_UNSPECIFIED;
135 }
136}
137
ebd61f68
AS
138static enum rdma_link_layer
139mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
140{
141 struct mlx5_ib_dev *dev = to_mdev(device);
142 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
143
144 return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
145}
146
fd65f1b8
MS
147static int get_port_state(struct ib_device *ibdev,
148 u8 port_num,
149 enum ib_port_state *state)
150{
151 struct ib_port_attr attr;
152 int ret;
153
154 memset(&attr, 0, sizeof(attr));
3023a1e9 155 ret = ibdev->ops.query_port(ibdev, port_num, &attr);
fd65f1b8
MS
156 if (!ret)
157 *state = attr.state;
158 return ret;
159}
160
35b0aa67
MB
161static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
162 struct net_device *ndev,
163 u8 *port_num)
164{
165 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
166 struct net_device *rep_ndev;
167 struct mlx5_ib_port *port;
168 int i;
169
170 for (i = 0; i < dev->num_ports; i++) {
171 port = &dev->port[i];
172 if (!port->rep)
173 continue;
174
175 read_lock(&port->roce.netdev_lock);
176 rep_ndev = mlx5_ib_get_rep_netdev(esw,
177 port->rep->vport);
178 if (rep_ndev == ndev) {
179 read_unlock(&port->roce.netdev_lock);
180 *port_num = i + 1;
181 return &port->roce;
182 }
183 read_unlock(&port->roce.netdev_lock);
184 }
185
186 return NULL;
187}
188
fc24fc5e
AS
189static int mlx5_netdev_event(struct notifier_block *this,
190 unsigned long event, void *ptr)
191{
7fd8aefb 192 struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
fc24fc5e 193 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
7fd8aefb
DJ
194 u8 port_num = roce->native_port_num;
195 struct mlx5_core_dev *mdev;
196 struct mlx5_ib_dev *ibdev;
197
198 ibdev = roce->dev;
32f69e4b
DJ
199 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
200 if (!mdev)
201 return NOTIFY_DONE;
fc24fc5e 202
5ec8c83e
AH
203 switch (event) {
204 case NETDEV_REGISTER:
35b0aa67
MB
205 /* Should already be registered during the load */
206 if (ibdev->is_rep)
207 break;
7fd8aefb 208 write_lock(&roce->netdev_lock);
dce45af5 209 if (ndev->dev.parent == mdev->device)
842a9c83 210 roce->netdev = ndev;
7fd8aefb 211 write_unlock(&roce->netdev_lock);
5ec8c83e 212 break;
fc24fc5e 213
842a9c83 214 case NETDEV_UNREGISTER:
35b0aa67 215 /* In case of reps, ib device goes away before the netdevs */
842a9c83
OG
216 write_lock(&roce->netdev_lock);
217 if (roce->netdev == ndev)
218 roce->netdev = NULL;
219 write_unlock(&roce->netdev_lock);
220 break;
221
fd65f1b8 222 case NETDEV_CHANGE:
5ec8c83e 223 case NETDEV_UP:
88621dfe 224 case NETDEV_DOWN: {
7fd8aefb 225 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
88621dfe
AH
226 struct net_device *upper = NULL;
227
228 if (lag_ndev) {
229 upper = netdev_master_upper_dev_get(lag_ndev);
230 dev_put(lag_ndev);
231 }
232
35b0aa67
MB
233 if (ibdev->is_rep)
234 roce = mlx5_get_rep_roce(ibdev, ndev, &port_num);
235 if (!roce)
236 return NOTIFY_DONE;
7fd8aefb 237 if ((upper == ndev || (!upper && ndev == roce->netdev))
88621dfe 238 && ibdev->ib_active) {
626bc02d 239 struct ib_event ibev = { };
fd65f1b8 240 enum ib_port_state port_state;
5ec8c83e 241
7fd8aefb
DJ
242 if (get_port_state(&ibdev->ib_dev, port_num,
243 &port_state))
244 goto done;
fd65f1b8 245
7fd8aefb
DJ
246 if (roce->last_port_state == port_state)
247 goto done;
fd65f1b8 248
7fd8aefb 249 roce->last_port_state = port_state;
5ec8c83e 250 ibev.device = &ibdev->ib_dev;
fd65f1b8
MS
251 if (port_state == IB_PORT_DOWN)
252 ibev.event = IB_EVENT_PORT_ERR;
253 else if (port_state == IB_PORT_ACTIVE)
254 ibev.event = IB_EVENT_PORT_ACTIVE;
255 else
7fd8aefb 256 goto done;
fd65f1b8 257
7fd8aefb 258 ibev.element.port_num = port_num;
5ec8c83e
AH
259 ib_dispatch_event(&ibev);
260 }
261 break;
88621dfe 262 }
fc24fc5e 263
5ec8c83e
AH
264 default:
265 break;
266 }
7fd8aefb 267done:
32f69e4b 268 mlx5_ib_put_native_port_mdev(ibdev, port_num);
fc24fc5e
AS
269 return NOTIFY_DONE;
270}
271
272static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
273 u8 port_num)
274{
275 struct mlx5_ib_dev *ibdev = to_mdev(device);
276 struct net_device *ndev;
32f69e4b
DJ
277 struct mlx5_core_dev *mdev;
278
279 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
280 if (!mdev)
281 return NULL;
fc24fc5e 282
32f69e4b 283 ndev = mlx5_lag_get_roce_netdev(mdev);
88621dfe 284 if (ndev)
32f69e4b 285 goto out;
88621dfe 286
fc24fc5e
AS
287 /* Ensure ndev does not disappear before we invoke dev_hold()
288 */
95579e78
MB
289 read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
290 ndev = ibdev->port[port_num - 1].roce.netdev;
fc24fc5e
AS
291 if (ndev)
292 dev_hold(ndev);
95579e78 293 read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
fc24fc5e 294
32f69e4b
DJ
295out:
296 mlx5_ib_put_native_port_mdev(ibdev, port_num);
fc24fc5e
AS
297 return ndev;
298}
299
32f69e4b
DJ
300struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
301 u8 ib_port_num,
302 u8 *native_port_num)
303{
304 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
305 ib_port_num);
306 struct mlx5_core_dev *mdev = NULL;
307 struct mlx5_ib_multiport_info *mpi;
308 struct mlx5_ib_port *port;
309
210b1f78
MB
310 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
311 ll != IB_LINK_LAYER_ETHERNET) {
312 if (native_port_num)
313 *native_port_num = ib_port_num;
314 return ibdev->mdev;
315 }
316
32f69e4b
DJ
317 if (native_port_num)
318 *native_port_num = 1;
319
32f69e4b
DJ
320 port = &ibdev->port[ib_port_num - 1];
321 if (!port)
322 return NULL;
323
324 spin_lock(&port->mp.mpi_lock);
325 mpi = ibdev->port[ib_port_num - 1].mp.mpi;
326 if (mpi && !mpi->unaffiliate) {
327 mdev = mpi->mdev;
328 /* If it's the master no need to refcount, it'll exist
329 * as long as the ib_dev exists.
330 */
331 if (!mpi->is_master)
332 mpi->mdev_refcnt++;
333 }
334 spin_unlock(&port->mp.mpi_lock);
335
336 return mdev;
337}
338
339void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
340{
341 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
342 port_num);
343 struct mlx5_ib_multiport_info *mpi;
344 struct mlx5_ib_port *port;
345
346 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
347 return;
348
349 port = &ibdev->port[port_num - 1];
350
351 spin_lock(&port->mp.mpi_lock);
352 mpi = ibdev->port[port_num - 1].mp.mpi;
353 if (mpi->is_master)
354 goto out;
355
356 mpi->mdev_refcnt--;
357 if (mpi->unaffiliate)
358 complete(&mpi->unref_comp);
359out:
360 spin_unlock(&port->mp.mpi_lock);
361}
362
08e8676f
AL
363static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed,
364 u8 *active_width)
f1b65df5
NO
365{
366 switch (eth_proto_oper) {
367 case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
368 case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
369 case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
370 case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
371 *active_width = IB_WIDTH_1X;
372 *active_speed = IB_SPEED_SDR;
373 break;
374 case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
375 case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
376 case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
377 case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
378 case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
379 case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
380 case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
381 *active_width = IB_WIDTH_1X;
382 *active_speed = IB_SPEED_QDR;
383 break;
384 case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
385 case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
386 case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
387 *active_width = IB_WIDTH_1X;
388 *active_speed = IB_SPEED_EDR;
389 break;
390 case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
391 case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
392 case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
393 case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
394 *active_width = IB_WIDTH_4X;
395 *active_speed = IB_SPEED_QDR;
396 break;
397 case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
398 case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
399 case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
400 *active_width = IB_WIDTH_1X;
401 *active_speed = IB_SPEED_HDR;
402 break;
403 case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
404 *active_width = IB_WIDTH_4X;
405 *active_speed = IB_SPEED_FDR;
406 break;
407 case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
408 case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
409 case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
410 case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
411 *active_width = IB_WIDTH_4X;
412 *active_speed = IB_SPEED_EDR;
413 break;
414 default:
415 return -EINVAL;
416 }
417
418 return 0;
419}
420
08e8676f
AL
421static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
422 u8 *active_width)
423{
424 switch (eth_proto_oper) {
425 case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
426 case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
427 *active_width = IB_WIDTH_1X;
428 *active_speed = IB_SPEED_SDR;
429 break;
430 case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
431 *active_width = IB_WIDTH_1X;
432 *active_speed = IB_SPEED_DDR;
433 break;
434 case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
435 *active_width = IB_WIDTH_1X;
436 *active_speed = IB_SPEED_QDR;
437 break;
438 case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
439 *active_width = IB_WIDTH_4X;
440 *active_speed = IB_SPEED_QDR;
441 break;
442 case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
443 *active_width = IB_WIDTH_1X;
444 *active_speed = IB_SPEED_EDR;
445 break;
446 case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
cd272875
AL
447 *active_width = IB_WIDTH_2X;
448 *active_speed = IB_SPEED_EDR;
449 break;
08e8676f
AL
450 case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
451 *active_width = IB_WIDTH_1X;
452 *active_speed = IB_SPEED_HDR;
453 break;
cd272875
AL
454 case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
455 *active_width = IB_WIDTH_4X;
456 *active_speed = IB_SPEED_EDR;
457 break;
08e8676f
AL
458 case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
459 *active_width = IB_WIDTH_2X;
460 *active_speed = IB_SPEED_HDR;
461 break;
462 case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
463 *active_width = IB_WIDTH_4X;
464 *active_speed = IB_SPEED_HDR;
465 break;
466 default:
467 return -EINVAL;
468 }
469
470 return 0;
471}
472
473static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
474 u8 *active_width, bool ext)
475{
476 return ext ?
477 translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
478 active_width) :
479 translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
480 active_width);
481}
482
095b0927
IT
483static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
484 struct ib_port_attr *props)
3f89a643
AS
485{
486 struct mlx5_ib_dev *dev = to_mdev(device);
bc4e12ff 487 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
da005f9f 488 struct mlx5_core_dev *mdev;
88621dfe 489 struct net_device *ndev, *upper;
3f89a643 490 enum ib_mtu ndev_ib_mtu;
b3cbd6f0 491 bool put_mdev = true;
c876a1b7 492 u16 qkey_viol_cntr;
f1b65df5 493 u32 eth_prot_oper;
b3cbd6f0 494 u8 mdev_port_num;
08e8676f 495 bool ext;
095b0927 496 int err;
3f89a643 497
b3cbd6f0
DJ
498 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
499 if (!mdev) {
500 /* This means the port isn't affiliated yet. Get the
501 * info for the master port instead.
502 */
503 put_mdev = false;
504 mdev = dev->mdev;
505 mdev_port_num = 1;
506 port_num = 1;
507 }
508
f1b65df5
NO
509 /* Possible bad flows are checked before filling out props so in case
510 * of an error it will still be zeroed out.
26628e2d 511 * Use native port in case of reps
50f22fd8 512 */
26628e2d
MB
513 if (dev->is_rep)
514 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
515 1);
516 else
517 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
518 mdev_port_num);
095b0927 519 if (err)
b3cbd6f0 520 goto out;
08e8676f
AL
521 ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
522 eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
f1b65df5 523
7672ed33
HL
524 props->active_width = IB_WIDTH_4X;
525 props->active_speed = IB_SPEED_QDR;
526
f1b65df5 527 translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
08e8676f 528 &props->active_width, ext);
3f89a643 529
2f944c0f
JG
530 props->port_cap_flags |= IB_PORT_CM_SUP;
531 props->ip_gids = true;
3f89a643
AS
532
533 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
534 roce_address_table_size);
535 props->max_mtu = IB_MTU_4096;
536 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
537 props->pkey_tbl_len = 1;
538 props->state = IB_PORT_DOWN;
72a7720f 539 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
3f89a643 540
b3cbd6f0 541 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
c876a1b7 542 props->qkey_viol_cntr = qkey_viol_cntr;
3f89a643 543
b3cbd6f0
DJ
544 /* If this is a stub query for an unaffiliated port stop here */
545 if (!put_mdev)
546 goto out;
547
3f89a643
AS
548 ndev = mlx5_ib_get_netdev(device, port_num);
549 if (!ndev)
b3cbd6f0 550 goto out;
3f89a643 551
7c34ec19 552 if (dev->lag_active) {
88621dfe
AH
553 rcu_read_lock();
554 upper = netdev_master_upper_dev_get_rcu(ndev);
555 if (upper) {
556 dev_put(ndev);
557 ndev = upper;
558 dev_hold(ndev);
559 }
560 rcu_read_unlock();
561 }
562
3f89a643
AS
563 if (netif_running(ndev) && netif_carrier_ok(ndev)) {
564 props->state = IB_PORT_ACTIVE;
72a7720f 565 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
3f89a643
AS
566 }
567
568 ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
569
570 dev_put(ndev);
571
572 props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
b3cbd6f0
DJ
573out:
574 if (put_mdev)
575 mlx5_ib_put_native_port_mdev(dev, port_num);
576 return err;
3f89a643
AS
577}
578
095b0927
IT
579static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
580 unsigned int index, const union ib_gid *gid,
581 const struct ib_gid_attr *attr)
3cca2606 582{
095b0927 583 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
a70c0739 584 u16 vlan_id = 0xffff;
095b0927
IT
585 u8 roce_version = 0;
586 u8 roce_l3_type = 0;
095b0927 587 u8 mac[ETH_ALEN];
a70c0739 588 int ret;
095b0927
IT
589
590 if (gid) {
591 gid_type = attr->gid_type;
a70c0739
PP
592 ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
593 if (ret)
594 return ret;
3cca2606
AS
595 }
596
095b0927 597 switch (gid_type) {
3cca2606 598 case IB_GID_TYPE_IB:
095b0927 599 roce_version = MLX5_ROCE_VERSION_1;
3cca2606
AS
600 break;
601 case IB_GID_TYPE_ROCE_UDP_ENCAP:
095b0927
IT
602 roce_version = MLX5_ROCE_VERSION_2;
603 if (ipv6_addr_v4mapped((void *)gid))
604 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
605 else
606 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
3cca2606
AS
607 break;
608
609 default:
095b0927 610 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
3cca2606
AS
611 }
612
095b0927 613 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
cf34e1fe 614 roce_l3_type, gid->raw, mac,
a70c0739 615 vlan_id < VLAN_CFI_MASK, vlan_id,
cf34e1fe 616 port_num);
3cca2606
AS
617}
618
f4df9a7c 619static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
3cca2606
AS
620 __always_unused void **context)
621{
414448d2 622 return set_roce_addr(to_mdev(attr->device), attr->port_num,
f4df9a7c 623 attr->index, &attr->gid, attr);
3cca2606
AS
624}
625
414448d2
PP
626static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
627 __always_unused void **context)
3cca2606 628{
414448d2
PP
629 return set_roce_addr(to_mdev(attr->device), attr->port_num,
630 attr->index, NULL, NULL);
3cca2606
AS
631}
632
47ec3866
PP
633__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
634 const struct ib_gid_attr *attr)
2811ba51 635{
47ec3866 636 if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
2811ba51
AS
637 return 0;
638
639 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
640}
641
1b5daf11
MD
642static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
643{
7fae6655
NO
644 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
645 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
646 return 0;
1b5daf11
MD
647}
648
649enum {
650 MLX5_VPORT_ACCESS_METHOD_MAD,
651 MLX5_VPORT_ACCESS_METHOD_HCA,
652 MLX5_VPORT_ACCESS_METHOD_NIC,
653};
654
655static int mlx5_get_vport_access_method(struct ib_device *ibdev)
656{
657 if (mlx5_use_mad_ifc(to_mdev(ibdev)))
658 return MLX5_VPORT_ACCESS_METHOD_MAD;
659
ebd61f68 660 if (mlx5_ib_port_link_layer(ibdev, 1) ==
1b5daf11
MD
661 IB_LINK_LAYER_ETHERNET)
662 return MLX5_VPORT_ACCESS_METHOD_NIC;
663
664 return MLX5_VPORT_ACCESS_METHOD_HCA;
665}
666
da7525d2 667static void get_atomic_caps(struct mlx5_ib_dev *dev,
776a3906 668 u8 atomic_size_qp,
da7525d2
EBE
669 struct ib_device_attr *props)
670{
671 u8 tmp;
672 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
da7525d2 673 u8 atomic_req_8B_endianness_mode =
bd10838a 674 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
da7525d2
EBE
675
676 /* Check if HW supports 8 bytes standard atomic operations and capable
677 * of host endianness respond
678 */
679 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
680 if (((atomic_operations & tmp) == tmp) &&
681 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
682 (atomic_req_8B_endianness_mode)) {
683 props->atomic_cap = IB_ATOMIC_HCA;
684 } else {
685 props->atomic_cap = IB_ATOMIC_NONE;
686 }
687}
688
776a3906
MS
689static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
690 struct ib_device_attr *props)
691{
692 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
693
694 get_atomic_caps(dev, atomic_size_qp, props);
695}
696
1b5daf11
MD
697static int mlx5_query_system_image_guid(struct ib_device *ibdev,
698 __be64 *sys_image_guid)
699{
700 struct mlx5_ib_dev *dev = to_mdev(ibdev);
701 struct mlx5_core_dev *mdev = dev->mdev;
702 u64 tmp;
703 int err;
704
705 switch (mlx5_get_vport_access_method(ibdev)) {
706 case MLX5_VPORT_ACCESS_METHOD_MAD:
707 return mlx5_query_mad_ifc_system_image_guid(ibdev,
708 sys_image_guid);
709
710 case MLX5_VPORT_ACCESS_METHOD_HCA:
711 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
3f89a643
AS
712 break;
713
714 case MLX5_VPORT_ACCESS_METHOD_NIC:
715 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
716 break;
1b5daf11
MD
717
718 default:
719 return -EINVAL;
720 }
3f89a643
AS
721
722 if (!err)
723 *sys_image_guid = cpu_to_be64(tmp);
724
725 return err;
726
1b5daf11
MD
727}
728
729static int mlx5_query_max_pkeys(struct ib_device *ibdev,
730 u16 *max_pkeys)
731{
732 struct mlx5_ib_dev *dev = to_mdev(ibdev);
733 struct mlx5_core_dev *mdev = dev->mdev;
734
735 switch (mlx5_get_vport_access_method(ibdev)) {
736 case MLX5_VPORT_ACCESS_METHOD_MAD:
737 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
738
739 case MLX5_VPORT_ACCESS_METHOD_HCA:
740 case MLX5_VPORT_ACCESS_METHOD_NIC:
741 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
742 pkey_table_size));
743 return 0;
744
745 default:
746 return -EINVAL;
747 }
748}
749
750static int mlx5_query_vendor_id(struct ib_device *ibdev,
751 u32 *vendor_id)
752{
753 struct mlx5_ib_dev *dev = to_mdev(ibdev);
754
755 switch (mlx5_get_vport_access_method(ibdev)) {
756 case MLX5_VPORT_ACCESS_METHOD_MAD:
757 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
758
759 case MLX5_VPORT_ACCESS_METHOD_HCA:
760 case MLX5_VPORT_ACCESS_METHOD_NIC:
761 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
762
763 default:
764 return -EINVAL;
765 }
766}
767
768static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
769 __be64 *node_guid)
770{
771 u64 tmp;
772 int err;
773
774 switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
775 case MLX5_VPORT_ACCESS_METHOD_MAD:
776 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
777
778 case MLX5_VPORT_ACCESS_METHOD_HCA:
779 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
3f89a643
AS
780 break;
781
782 case MLX5_VPORT_ACCESS_METHOD_NIC:
783 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
784 break;
1b5daf11
MD
785
786 default:
787 return -EINVAL;
788 }
3f89a643
AS
789
790 if (!err)
791 *node_guid = cpu_to_be64(tmp);
792
793 return err;
1b5daf11
MD
794}
795
796struct mlx5_reg_node_desc {
bd99fdea 797 u8 desc[IB_DEVICE_NODE_DESC_MAX];
1b5daf11
MD
798};
799
800static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
801{
802 struct mlx5_reg_node_desc in;
803
804 if (mlx5_use_mad_ifc(dev))
805 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
806
807 memset(&in, 0, sizeof(in));
808
809 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
810 sizeof(struct mlx5_reg_node_desc),
811 MLX5_REG_NODE_DESC, 0, 0);
812}
813
e126ba97 814static int mlx5_ib_query_device(struct ib_device *ibdev,
2528e33e
MB
815 struct ib_device_attr *props,
816 struct ib_udata *uhw)
e126ba97 817{
48357091 818 size_t uhw_outlen = (uhw) ? uhw->outlen : 0;
e126ba97 819 struct mlx5_ib_dev *dev = to_mdev(ibdev);
938fe83c 820 struct mlx5_core_dev *mdev = dev->mdev;
e126ba97 821 int err = -ENOMEM;
288c01b7 822 int max_sq_desc;
e126ba97
EC
823 int max_rq_sg;
824 int max_sq_sg;
e0238a6a 825 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
85c7c014 826 bool raw_support = !mlx5_core_mp_enabled(mdev);
402ca536
BW
827 struct mlx5_ib_query_device_resp resp = {};
828 size_t resp_len;
829 u64 max_tso;
e126ba97 830
402ca536 831 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
48357091 832 if (uhw_outlen && uhw_outlen < resp_len)
402ca536 833 return -EINVAL;
6f26b2ac
EA
834
835 resp.response_length = resp_len;
402ca536 836
48357091 837 if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
2528e33e
MB
838 return -EINVAL;
839
1b5daf11
MD
840 memset(props, 0, sizeof(*props));
841 err = mlx5_query_system_image_guid(ibdev,
842 &props->sys_image_guid);
843 if (err)
844 return err;
e126ba97 845
1b5daf11 846 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
e126ba97 847 if (err)
1b5daf11 848 return err;
e126ba97 849
1b5daf11
MD
850 err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
851 if (err)
852 return err;
e126ba97 853
9603b61d
JM
854 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
855 (fw_rev_min(dev->mdev) << 16) |
856 fw_rev_sub(dev->mdev);
e126ba97
EC
857 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
858 IB_DEVICE_PORT_ACTIVE_EVENT |
859 IB_DEVICE_SYS_IMAGE_GUID |
1a4c3a3d 860 IB_DEVICE_RC_RNR_NAK_GEN;
938fe83c
SM
861
862 if (MLX5_CAP_GEN(mdev, pkv))
e126ba97 863 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
938fe83c 864 if (MLX5_CAP_GEN(mdev, qkv))
e126ba97 865 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
938fe83c 866 if (MLX5_CAP_GEN(mdev, apm))
e126ba97 867 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
938fe83c 868 if (MLX5_CAP_GEN(mdev, xrc))
e126ba97 869 props->device_cap_flags |= IB_DEVICE_XRC;
d2370e0a
MB
870 if (MLX5_CAP_GEN(mdev, imaicl)) {
871 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
872 IB_DEVICE_MEM_WINDOW_TYPE_2B;
873 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
b005d316
SG
874 /* We support 'Gappy' memory registration too */
875 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
d2370e0a 876 }
e126ba97 877 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
938fe83c 878 if (MLX5_CAP_GEN(mdev, sho)) {
c0a6cbb9 879 props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
2dea9094
SG
880 /* At this stage no support for signature handover */
881 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
882 IB_PROT_T10DIF_TYPE_2 |
883 IB_PROT_T10DIF_TYPE_3;
884 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
885 IB_GUARD_T10DIF_CSUM;
886 }
938fe83c 887 if (MLX5_CAP_GEN(mdev, block_lb_mc))
f360d88a 888 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
e126ba97 889
85c7c014 890 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
e8161334
NO
891 if (MLX5_CAP_ETH(mdev, csum_cap)) {
892 /* Legacy bit to support old userspace libraries */
88115fe7 893 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
e8161334
NO
894 props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
895 }
896
897 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
898 props->raw_packet_caps |=
899 IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
88115fe7 900
48357091 901 if (field_avail(typeof(resp), tso_caps, uhw_outlen)) {
402ca536
BW
902 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
903 if (max_tso) {
904 resp.tso_caps.max_tso = 1 << max_tso;
905 resp.tso_caps.supported_qpts |=
906 1 << IB_QPT_RAW_PACKET;
907 resp.response_length += sizeof(resp.tso_caps);
908 }
909 }
31f69a82 910
48357091 911 if (field_avail(typeof(resp), rss_caps, uhw_outlen)) {
31f69a82
YH
912 resp.rss_caps.rx_hash_function =
913 MLX5_RX_HASH_FUNC_TOEPLITZ;
914 resp.rss_caps.rx_hash_fields_mask =
915 MLX5_RX_HASH_SRC_IPV4 |
916 MLX5_RX_HASH_DST_IPV4 |
917 MLX5_RX_HASH_SRC_IPV6 |
918 MLX5_RX_HASH_DST_IPV6 |
919 MLX5_RX_HASH_SRC_PORT_TCP |
920 MLX5_RX_HASH_DST_PORT_TCP |
921 MLX5_RX_HASH_SRC_PORT_UDP |
4e2b53a5
MG
922 MLX5_RX_HASH_DST_PORT_UDP |
923 MLX5_RX_HASH_INNER;
2d93fc85
MB
924 if (mlx5_accel_ipsec_device_caps(dev->mdev) &
925 MLX5_ACCEL_IPSEC_CAP_DEVICE)
926 resp.rss_caps.rx_hash_fields_mask |=
927 MLX5_RX_HASH_IPSEC_SPI;
31f69a82
YH
928 resp.response_length += sizeof(resp.rss_caps);
929 }
930 } else {
48357091 931 if (field_avail(typeof(resp), tso_caps, uhw_outlen))
31f69a82 932 resp.response_length += sizeof(resp.tso_caps);
48357091 933 if (field_avail(typeof(resp), rss_caps, uhw_outlen))
31f69a82 934 resp.response_length += sizeof(resp.rss_caps);
402ca536
BW
935 }
936
f0313965
ES
937 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
938 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
939 props->device_cap_flags |= IB_DEVICE_UD_TSO;
940 }
941
03404e8a 942 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
85c7c014
DJ
943 MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
944 raw_support)
03404e8a
MG
945 props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
946
1d54f890
YH
947 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
948 MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
949 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
950
cff5a0f3 951 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
85c7c014
DJ
952 MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
953 raw_support) {
e8161334 954 /* Legacy bit to support old userspace libraries */
cff5a0f3 955 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
e8161334
NO
956 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
957 }
cff5a0f3 958
24da0016
AL
959 if (MLX5_CAP_DEV_MEM(mdev, memic)) {
960 props->max_dm_size =
961 MLX5_CAP_DEV_MEM(mdev, max_memic_size);
962 }
963
da6d6ba3
MG
964 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
965 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
966
b1383aa6
NO
967 if (MLX5_CAP_GEN(mdev, end_pad))
968 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
969
1b5daf11
MD
970 props->vendor_part_id = mdev->pdev->device;
971 props->hw_ver = mdev->pdev->revision;
e126ba97
EC
972
973 props->max_mr_size = ~0ull;
e0238a6a 974 props->page_size_cap = ~(min_page_size - 1);
938fe83c
SM
975 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
976 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
977 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
978 sizeof(struct mlx5_wqe_data_seg);
288c01b7
EC
979 max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
980 max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
981 sizeof(struct mlx5_wqe_raddr_seg)) /
982 sizeof(struct mlx5_wqe_data_seg);
33023fb8
SW
983 props->max_send_sge = max_sq_sg;
984 props->max_recv_sge = max_rq_sg;
986ef95e 985 props->max_sge_rd = MLX5_MAX_SGE_RD;
938fe83c 986 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
9f177686 987 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
938fe83c
SM
988 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
989 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
990 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
991 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
992 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
993 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
994 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
e126ba97 995 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
e126ba97 996 props->max_srq_sge = max_rq_sg - 1;
911f4331
SG
997 props->max_fast_reg_page_list_len =
998 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
62e3c379
MG
999 props->max_pi_fast_reg_page_list_len =
1000 props->max_fast_reg_page_list_len / 2;
36609056
YF
1001 props->max_sgl_rd =
1002 MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
776a3906 1003 get_atomic_caps_qp(dev, props);
81bea28f 1004 props->masked_atomic_cap = IB_ATOMIC_NONE;
938fe83c
SM
1005 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
1006 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
e126ba97
EC
1007 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1008 props->max_mcast_grp;
1009 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
86695a65 1010 props->max_ah = INT_MAX;
7c60bcbb
MB
1011 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
1012 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
e126ba97 1013
e502b8b0 1014 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
00815752 1015 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
e502b8b0
LR
1016 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
1017 props->odp_caps = dev->odp_caps;
a73a8955
MS
1018 if (!uhw) {
1019 /* ODP for kernel QPs is not implemented for receive
1020 * WQEs and SRQ WQEs
1021 */
1022 props->odp_caps.per_transport_caps.rc_odp_caps &=
1023 ~(IB_ODP_SUPPORT_READ |
1024 IB_ODP_SUPPORT_SRQ_RECV);
1025 props->odp_caps.per_transport_caps.uc_odp_caps &=
1026 ~(IB_ODP_SUPPORT_READ |
1027 IB_ODP_SUPPORT_SRQ_RECV);
1028 props->odp_caps.per_transport_caps.ud_odp_caps &=
1029 ~(IB_ODP_SUPPORT_READ |
1030 IB_ODP_SUPPORT_SRQ_RECV);
1031 props->odp_caps.per_transport_caps.xrc_odp_caps &=
1032 ~(IB_ODP_SUPPORT_READ |
1033 IB_ODP_SUPPORT_SRQ_RECV);
1034 }
e502b8b0 1035 }
8cdd312c 1036
051f2630
LR
1037 if (MLX5_CAP_GEN(mdev, cd))
1038 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
1039
e53a9d26 1040 if (mlx5_core_is_vf(mdev))
eff901d3
EC
1041 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
1042
31f69a82 1043 if (mlx5_ib_port_link_layer(ibdev, 1) ==
85c7c014 1044 IB_LINK_LAYER_ETHERNET && raw_support) {
31f69a82
YH
1045 props->rss_caps.max_rwq_indirection_tables =
1046 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1047 props->rss_caps.max_rwq_indirection_table_size =
1048 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1049 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
1050 props->max_wq_type_rq =
1051 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1052 }
1053
eb761894 1054 if (MLX5_CAP_GEN(mdev, tag_matching)) {
78b1beb0 1055 props->tm_caps.max_num_tags =
eb761894 1056 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
78b1beb0 1057 props->tm_caps.max_ops =
eb761894 1058 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
78b1beb0 1059 props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
eb761894
AK
1060 }
1061
89705e92
DG
1062 if (MLX5_CAP_GEN(mdev, tag_matching) &&
1063 MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
1064 props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
1065 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
1066 }
1067
87ab3f52
YC
1068 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1069 props->cq_caps.max_cq_moderation_count =
1070 MLX5_MAX_CQ_COUNT;
1071 props->cq_caps.max_cq_moderation_period =
1072 MLX5_MAX_CQ_PERIOD;
1073 }
1074
48357091 1075 if (field_avail(typeof(resp), cqe_comp_caps, uhw_outlen)) {
7e43a2a5 1076 resp.response_length += sizeof(resp.cqe_comp_caps);
572f46bf
YC
1077
1078 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1079 resp.cqe_comp_caps.max_num =
1080 MLX5_CAP_GEN(dev->mdev,
1081 cqe_compression_max_num);
1082
1083 resp.cqe_comp_caps.supported_format =
1084 MLX5_IB_CQE_RES_FORMAT_HASH |
1085 MLX5_IB_CQE_RES_FORMAT_CSUM;
6f1006a4
YC
1086
1087 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1088 resp.cqe_comp_caps.supported_format |=
1089 MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
572f46bf 1090 }
7e43a2a5
BW
1091 }
1092
48357091 1093 if (field_avail(typeof(resp), packet_pacing_caps, uhw_outlen) &&
85c7c014 1094 raw_support) {
d949167d
BW
1095 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1096 MLX5_CAP_GEN(mdev, qos)) {
1097 resp.packet_pacing_caps.qp_rate_limit_max =
1098 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1099 resp.packet_pacing_caps.qp_rate_limit_min =
1100 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1101 resp.packet_pacing_caps.supported_qpts |=
1102 1 << IB_QPT_RAW_PACKET;
61147f39
BW
1103 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1104 MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1105 resp.packet_pacing_caps.cap_flags |=
1106 MLX5_IB_PP_SUPPORT_BURST;
d949167d
BW
1107 }
1108 resp.response_length += sizeof(resp.packet_pacing_caps);
1109 }
1110
9f885201 1111 if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
48357091 1112 uhw_outlen)) {
795b609c
BW
1113 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1114 resp.mlx5_ib_support_multi_pkt_send_wqes =
1115 MLX5_IB_ALLOW_MPW;
050da902
BW
1116
1117 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1118 resp.mlx5_ib_support_multi_pkt_send_wqes |=
1119 MLX5_IB_SUPPORT_EMPW;
1120
9f885201
LR
1121 resp.response_length +=
1122 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1123 }
1124
48357091 1125 if (field_avail(typeof(resp), flags, uhw_outlen)) {
de57f2ad 1126 resp.response_length += sizeof(resp.flags);
7a0c8f42 1127
de57f2ad
GL
1128 if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1129 resp.flags |=
1130 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
7a0c8f42
GL
1131
1132 if (MLX5_CAP_GEN(mdev, cqe_128_always))
1133 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
7e11b911
DG
1134 if (MLX5_CAP_GEN(mdev, qp_packet_based))
1135 resp.flags |=
1136 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
7249c8ea
GL
1137
1138 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
de57f2ad 1139 }
9f885201 1140
48357091 1141 if (field_avail(typeof(resp), sw_parsing_caps, uhw_outlen)) {
96dc3fc5
NO
1142 resp.response_length += sizeof(resp.sw_parsing_caps);
1143 if (MLX5_CAP_ETH(mdev, swp)) {
1144 resp.sw_parsing_caps.sw_parsing_offloads |=
1145 MLX5_IB_SW_PARSING;
1146
1147 if (MLX5_CAP_ETH(mdev, swp_csum))
1148 resp.sw_parsing_caps.sw_parsing_offloads |=
1149 MLX5_IB_SW_PARSING_CSUM;
1150
1151 if (MLX5_CAP_ETH(mdev, swp_lso))
1152 resp.sw_parsing_caps.sw_parsing_offloads |=
1153 MLX5_IB_SW_PARSING_LSO;
1154
1155 if (resp.sw_parsing_caps.sw_parsing_offloads)
1156 resp.sw_parsing_caps.supported_qpts =
1157 BIT(IB_QPT_RAW_PACKET);
1158 }
1159 }
1160
48357091 1161 if (field_avail(typeof(resp), striding_rq_caps, uhw_outlen) &&
85c7c014 1162 raw_support) {
b4f34597
NO
1163 resp.response_length += sizeof(resp.striding_rq_caps);
1164 if (MLX5_CAP_GEN(mdev, striding_rq)) {
1165 resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1166 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1167 resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1168 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
c16339b6
MZ
1169 if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
1170 resp.striding_rq_caps
1171 .min_single_wqe_log_num_of_strides =
1172 MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1173 else
1174 resp.striding_rq_caps
1175 .min_single_wqe_log_num_of_strides =
1176 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
b4f34597
NO
1177 resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1178 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1179 resp.striding_rq_caps.supported_qpts =
1180 BIT(IB_QPT_RAW_PACKET);
1181 }
1182 }
1183
48357091 1184 if (field_avail(typeof(resp), tunnel_offloads_caps, uhw_outlen)) {
f95ef6cb
MG
1185 resp.response_length += sizeof(resp.tunnel_offloads_caps);
1186 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1187 resp.tunnel_offloads_caps |=
1188 MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1189 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1190 resp.tunnel_offloads_caps |=
1191 MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1192 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1193 resp.tunnel_offloads_caps |=
1194 MLX5_IB_TUNNELED_OFFLOADS_GRE;
e818e255
AL
1195 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1196 MLX5_FLEX_PROTO_CW_MPLS_GRE)
1197 resp.tunnel_offloads_caps |=
1198 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1199 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1200 MLX5_FLEX_PROTO_CW_MPLS_UDP)
1201 resp.tunnel_offloads_caps |=
1202 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
f95ef6cb
MG
1203 }
1204
48357091 1205 if (uhw_outlen) {
402ca536
BW
1206 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1207
1208 if (err)
1209 return err;
1210 }
1211
1b5daf11 1212 return 0;
e126ba97
EC
1213}
1214
1b5daf11
MD
1215enum mlx5_ib_width {
1216 MLX5_IB_WIDTH_1X = 1 << 0,
1217 MLX5_IB_WIDTH_2X = 1 << 1,
1218 MLX5_IB_WIDTH_4X = 1 << 2,
1219 MLX5_IB_WIDTH_8X = 1 << 3,
1220 MLX5_IB_WIDTH_12X = 1 << 4
1221};
1222
db7a691a 1223static void translate_active_width(struct ib_device *ibdev, u8 active_width,
1b5daf11 1224 u8 *ib_width)
e126ba97
EC
1225{
1226 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1b5daf11 1227
db7a691a 1228 if (active_width & MLX5_IB_WIDTH_1X)
1b5daf11 1229 *ib_width = IB_WIDTH_1X;
d764970b
MG
1230 else if (active_width & MLX5_IB_WIDTH_2X)
1231 *ib_width = IB_WIDTH_2X;
db7a691a 1232 else if (active_width & MLX5_IB_WIDTH_4X)
1b5daf11 1233 *ib_width = IB_WIDTH_4X;
db7a691a 1234 else if (active_width & MLX5_IB_WIDTH_8X)
1b5daf11 1235 *ib_width = IB_WIDTH_8X;
db7a691a 1236 else if (active_width & MLX5_IB_WIDTH_12X)
1b5daf11 1237 *ib_width = IB_WIDTH_12X;
db7a691a
MG
1238 else {
1239 mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1b5daf11 1240 (int)active_width);
db7a691a 1241 *ib_width = IB_WIDTH_4X;
e126ba97
EC
1242 }
1243
db7a691a 1244 return;
1b5daf11 1245}
e126ba97 1246
1b5daf11
MD
1247static int mlx5_mtu_to_ib_mtu(int mtu)
1248{
1249 switch (mtu) {
1250 case 256: return 1;
1251 case 512: return 2;
1252 case 1024: return 3;
1253 case 2048: return 4;
1254 case 4096: return 5;
1255 default:
1256 pr_warn("invalid mtu\n");
1257 return -1;
e126ba97 1258 }
1b5daf11 1259}
e126ba97 1260
1b5daf11
MD
1261enum ib_max_vl_num {
1262 __IB_MAX_VL_0 = 1,
1263 __IB_MAX_VL_0_1 = 2,
1264 __IB_MAX_VL_0_3 = 3,
1265 __IB_MAX_VL_0_7 = 4,
1266 __IB_MAX_VL_0_14 = 5,
1267};
e126ba97 1268
1b5daf11
MD
1269enum mlx5_vl_hw_cap {
1270 MLX5_VL_HW_0 = 1,
1271 MLX5_VL_HW_0_1 = 2,
1272 MLX5_VL_HW_0_2 = 3,
1273 MLX5_VL_HW_0_3 = 4,
1274 MLX5_VL_HW_0_4 = 5,
1275 MLX5_VL_HW_0_5 = 6,
1276 MLX5_VL_HW_0_6 = 7,
1277 MLX5_VL_HW_0_7 = 8,
1278 MLX5_VL_HW_0_14 = 15
1279};
e126ba97 1280
1b5daf11
MD
1281static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1282 u8 *max_vl_num)
1283{
1284 switch (vl_hw_cap) {
1285 case MLX5_VL_HW_0:
1286 *max_vl_num = __IB_MAX_VL_0;
1287 break;
1288 case MLX5_VL_HW_0_1:
1289 *max_vl_num = __IB_MAX_VL_0_1;
1290 break;
1291 case MLX5_VL_HW_0_3:
1292 *max_vl_num = __IB_MAX_VL_0_3;
1293 break;
1294 case MLX5_VL_HW_0_7:
1295 *max_vl_num = __IB_MAX_VL_0_7;
1296 break;
1297 case MLX5_VL_HW_0_14:
1298 *max_vl_num = __IB_MAX_VL_0_14;
1299 break;
e126ba97 1300
1b5daf11
MD
1301 default:
1302 return -EINVAL;
e126ba97 1303 }
e126ba97 1304
1b5daf11 1305 return 0;
e126ba97
EC
1306}
1307
1b5daf11
MD
1308static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1309 struct ib_port_attr *props)
e126ba97 1310{
1b5daf11
MD
1311 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1312 struct mlx5_core_dev *mdev = dev->mdev;
1313 struct mlx5_hca_vport_context *rep;
046339ea
SM
1314 u16 max_mtu;
1315 u16 oper_mtu;
1b5daf11
MD
1316 int err;
1317 u8 ib_link_width_oper;
1318 u8 vl_hw_cap;
e126ba97 1319
1b5daf11
MD
1320 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1321 if (!rep) {
1322 err = -ENOMEM;
e126ba97 1323 goto out;
e126ba97 1324 }
e126ba97 1325
c4550c63 1326 /* props being zeroed by the caller, avoid zeroing it here */
e126ba97 1327
1b5daf11 1328 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
e126ba97
EC
1329 if (err)
1330 goto out;
1331
1b5daf11
MD
1332 props->lid = rep->lid;
1333 props->lmc = rep->lmc;
1334 props->sm_lid = rep->sm_lid;
1335 props->sm_sl = rep->sm_sl;
1336 props->state = rep->vport_state;
1337 props->phys_state = rep->port_physical_state;
1338 props->port_cap_flags = rep->cap_mask1;
1339 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1340 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1341 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1342 props->bad_pkey_cntr = rep->pkey_violation_counter;
1343 props->qkey_viol_cntr = rep->qkey_violation_counter;
1344 props->subnet_timeout = rep->subnet_timeout;
1345 props->init_type_reply = rep->init_type_reply;
e126ba97 1346
4106a758
MG
1347 if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1348 props->port_cap_flags2 = rep->cap_mask2;
1349
1b5daf11
MD
1350 err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
1351 if (err)
e126ba97 1352 goto out;
e126ba97 1353
db7a691a
MG
1354 translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1355
d5beb7f2 1356 err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
e126ba97
EC
1357 if (err)
1358 goto out;
1359
facc9699 1360 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
e126ba97 1361
1b5daf11 1362 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
e126ba97 1363
facc9699 1364 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
e126ba97 1365
1b5daf11 1366 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
e126ba97 1367
1b5daf11
MD
1368 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1369 if (err)
1370 goto out;
e126ba97 1371
1b5daf11
MD
1372 err = translate_max_vl_num(ibdev, vl_hw_cap,
1373 &props->max_vl_num);
e126ba97 1374out:
1b5daf11 1375 kfree(rep);
e126ba97
EC
1376 return err;
1377}
1378
1b5daf11
MD
1379int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1380 struct ib_port_attr *props)
e126ba97 1381{
095b0927
IT
1382 unsigned int count;
1383 int ret;
1384
1b5daf11
MD
1385 switch (mlx5_get_vport_access_method(ibdev)) {
1386 case MLX5_VPORT_ACCESS_METHOD_MAD:
095b0927
IT
1387 ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1388 break;
e126ba97 1389
1b5daf11 1390 case MLX5_VPORT_ACCESS_METHOD_HCA:
095b0927
IT
1391 ret = mlx5_query_hca_port(ibdev, port, props);
1392 break;
e126ba97 1393
3f89a643 1394 case MLX5_VPORT_ACCESS_METHOD_NIC:
095b0927
IT
1395 ret = mlx5_query_port_roce(ibdev, port, props);
1396 break;
3f89a643 1397
1b5daf11 1398 default:
095b0927
IT
1399 ret = -EINVAL;
1400 }
1401
1402 if (!ret && props) {
b3cbd6f0
DJ
1403 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1404 struct mlx5_core_dev *mdev;
1405 bool put_mdev = true;
1406
1407 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1408 if (!mdev) {
1409 /* If the port isn't affiliated yet query the master.
1410 * The master and slave will have the same values.
1411 */
1412 mdev = dev->mdev;
1413 port = 1;
1414 put_mdev = false;
1415 }
1416 count = mlx5_core_reserved_gids_count(mdev);
1417 if (put_mdev)
1418 mlx5_ib_put_native_port_mdev(dev, port);
095b0927 1419 props->gid_tbl_len -= count;
1b5daf11 1420 }
095b0927 1421 return ret;
1b5daf11 1422}
e126ba97 1423
8e6efa3a
MB
1424static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
1425 struct ib_port_attr *props)
1426{
1427 int ret;
1428
26628e2d
MB
1429 /* Only link layer == ethernet is valid for representors
1430 * and we always use port 1
1431 */
8e6efa3a
MB
1432 ret = mlx5_query_port_roce(ibdev, port, props);
1433 if (ret || !props)
1434 return ret;
1435
1436 /* We don't support GIDS */
1437 props->gid_tbl_len = 0;
1438
1439 return ret;
1440}
1441
1b5daf11
MD
1442static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1443 union ib_gid *gid)
1444{
1445 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1446 struct mlx5_core_dev *mdev = dev->mdev;
e126ba97 1447
1b5daf11
MD
1448 switch (mlx5_get_vport_access_method(ibdev)) {
1449 case MLX5_VPORT_ACCESS_METHOD_MAD:
1450 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
e126ba97 1451
1b5daf11
MD
1452 case MLX5_VPORT_ACCESS_METHOD_HCA:
1453 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1454
1455 default:
1456 return -EINVAL;
1457 }
e126ba97 1458
e126ba97
EC
1459}
1460
b3cbd6f0
DJ
1461static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
1462 u16 index, u16 *pkey)
1b5daf11
MD
1463{
1464 struct mlx5_ib_dev *dev = to_mdev(ibdev);
b3cbd6f0
DJ
1465 struct mlx5_core_dev *mdev;
1466 bool put_mdev = true;
1467 u8 mdev_port_num;
1468 int err;
1b5daf11 1469
b3cbd6f0
DJ
1470 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1471 if (!mdev) {
1472 /* The port isn't affiliated yet, get the PKey from the master
1473 * port. For RoCE the PKey tables will be the same.
1474 */
1475 put_mdev = false;
1476 mdev = dev->mdev;
1477 mdev_port_num = 1;
1478 }
1479
1480 err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1481 index, pkey);
1482 if (put_mdev)
1483 mlx5_ib_put_native_port_mdev(dev, port);
1484
1485 return err;
1486}
1487
1488static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1489 u16 *pkey)
1490{
1b5daf11
MD
1491 switch (mlx5_get_vport_access_method(ibdev)) {
1492 case MLX5_VPORT_ACCESS_METHOD_MAD:
1493 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1494
1495 case MLX5_VPORT_ACCESS_METHOD_HCA:
1496 case MLX5_VPORT_ACCESS_METHOD_NIC:
b3cbd6f0 1497 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1b5daf11
MD
1498 default:
1499 return -EINVAL;
1500 }
1501}
e126ba97
EC
1502
1503static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1504 struct ib_device_modify *props)
1505{
1506 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1507 struct mlx5_reg_node_desc in;
1508 struct mlx5_reg_node_desc out;
1509 int err;
1510
1511 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1512 return -EOPNOTSUPP;
1513
1514 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1515 return 0;
1516
1517 /*
1518 * If possible, pass node desc to FW, so it can generate
1519 * a 144 trap. If cmd fails, just ignore.
1520 */
bd99fdea 1521 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
9603b61d 1522 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
e126ba97
EC
1523 sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1524 if (err)
1525 return err;
1526
bd99fdea 1527 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
e126ba97
EC
1528
1529 return err;
1530}
1531
cdbe33d0
EC
1532static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
1533 u32 value)
1534{
1535 struct mlx5_hca_vport_context ctx = {};
b3cbd6f0
DJ
1536 struct mlx5_core_dev *mdev;
1537 u8 mdev_port_num;
cdbe33d0
EC
1538 int err;
1539
b3cbd6f0
DJ
1540 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1541 if (!mdev)
1542 return -ENODEV;
1543
1544 err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
cdbe33d0 1545 if (err)
b3cbd6f0 1546 goto out;
cdbe33d0
EC
1547
1548 if (~ctx.cap_mask1_perm & mask) {
1549 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1550 mask, ctx.cap_mask1_perm);
b3cbd6f0
DJ
1551 err = -EINVAL;
1552 goto out;
cdbe33d0
EC
1553 }
1554
1555 ctx.cap_mask1 = value;
1556 ctx.cap_mask1_perm = mask;
b3cbd6f0
DJ
1557 err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1558 0, &ctx);
1559
1560out:
1561 mlx5_ib_put_native_port_mdev(dev, port_num);
cdbe33d0
EC
1562
1563 return err;
1564}
1565
e126ba97
EC
1566static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1567 struct ib_port_modify *props)
1568{
1569 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1570 struct ib_port_attr attr;
1571 u32 tmp;
1572 int err;
cdbe33d0
EC
1573 u32 change_mask;
1574 u32 value;
1575 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1576 IB_LINK_LAYER_INFINIBAND);
1577
ec255879
MD
1578 /* CM layer calls ib_modify_port() regardless of the link layer. For
1579 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1580 */
1581 if (!is_ib)
1582 return 0;
1583
cdbe33d0
EC
1584 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1585 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1586 value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1587 return set_port_caps_atomic(dev, port, change_mask, value);
1588 }
e126ba97
EC
1589
1590 mutex_lock(&dev->cap_mask_mutex);
1591
c4550c63 1592 err = ib_query_port(ibdev, port, &attr);
e126ba97
EC
1593 if (err)
1594 goto out;
1595
1596 tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1597 ~props->clr_port_cap_mask;
1598
9603b61d 1599 err = mlx5_set_port_caps(dev->mdev, port, tmp);
e126ba97
EC
1600
1601out:
1602 mutex_unlock(&dev->cap_mask_mutex);
1603 return err;
1604}
1605
30aa60b3
EC
1606static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1607{
1608 mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1609 caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1610}
1611
31a78a5a
YH
1612static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1613{
1614 /* Large page with non 4k uar support might limit the dynamic size */
1615 if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
1616 return MLX5_MIN_DYN_BFREGS;
1617
1618 return MLX5_MAX_DYN_BFREGS;
1619}
1620
b037c29a
EC
1621static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1622 struct mlx5_ib_alloc_ucontext_req_v2 *req,
31a78a5a 1623 struct mlx5_bfreg_info *bfregi)
b037c29a
EC
1624{
1625 int uars_per_sys_page;
1626 int bfregs_per_sys_page;
1627 int ref_bfregs = req->total_num_bfregs;
1628
1629 if (req->total_num_bfregs == 0)
1630 return -EINVAL;
1631
1632 BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1633 BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1634
1635 if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1636 return -ENOMEM;
1637
1638 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1639 bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
31a78a5a 1640 /* This holds the required static allocation asked by the user */
b037c29a 1641 req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
b037c29a
EC
1642 if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1643 return -EINVAL;
1644
31a78a5a
YH
1645 bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1646 bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1647 bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1648 bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1649
1650 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
b037c29a
EC
1651 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1652 lib_uar_4k ? "yes" : "no", ref_bfregs,
31a78a5a
YH
1653 req->total_num_bfregs, bfregi->total_num_bfregs,
1654 bfregi->num_sys_pages);
b037c29a
EC
1655
1656 return 0;
1657}
1658
1659static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1660{
1661 struct mlx5_bfreg_info *bfregi;
1662 int err;
1663 int i;
1664
1665 bfregi = &context->bfregi;
31a78a5a 1666 for (i = 0; i < bfregi->num_static_sys_pages; i++) {
b037c29a
EC
1667 err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
1668 if (err)
1669 goto error;
1670
1671 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1672 }
4ed131d0
YH
1673
1674 for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1675 bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1676
b037c29a
EC
1677 return 0;
1678
1679error:
1680 for (--i; i >= 0; i--)
1681 if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
1682 mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1683
1684 return err;
1685}
1686
15177999
LR
1687static void deallocate_uars(struct mlx5_ib_dev *dev,
1688 struct mlx5_ib_ucontext *context)
b037c29a
EC
1689{
1690 struct mlx5_bfreg_info *bfregi;
b037c29a
EC
1691 int i;
1692
1693 bfregi = &context->bfregi;
15177999 1694 for (i = 0; i < bfregi->num_sys_pages; i++)
4ed131d0 1695 if (i < bfregi->num_static_sys_pages ||
15177999
LR
1696 bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1697 mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
b037c29a
EC
1698}
1699
0042f9e4 1700int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
a560f1d9
MB
1701{
1702 int err = 0;
1703
1704 mutex_lock(&dev->lb.mutex);
0042f9e4
MB
1705 if (td)
1706 dev->lb.user_td++;
1707 if (qp)
1708 dev->lb.qps++;
1709
1710 if (dev->lb.user_td == 2 ||
1711 dev->lb.qps == 1) {
1712 if (!dev->lb.enabled) {
1713 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1714 dev->lb.enabled = true;
1715 }
1716 }
a560f1d9
MB
1717
1718 mutex_unlock(&dev->lb.mutex);
1719
1720 return err;
1721}
1722
0042f9e4 1723void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
a560f1d9
MB
1724{
1725 mutex_lock(&dev->lb.mutex);
0042f9e4
MB
1726 if (td)
1727 dev->lb.user_td--;
1728 if (qp)
1729 dev->lb.qps--;
1730
1731 if (dev->lb.user_td == 1 &&
1732 dev->lb.qps == 0) {
1733 if (dev->lb.enabled) {
1734 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1735 dev->lb.enabled = false;
1736 }
1737 }
a560f1d9
MB
1738
1739 mutex_unlock(&dev->lb.mutex);
1740}
1741
d2d19121
YH
1742static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
1743 u16 uid)
c85023e1
HN
1744{
1745 int err;
1746
cfdeb893
LR
1747 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1748 return 0;
1749
d2d19121 1750 err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
c85023e1
HN
1751 if (err)
1752 return err;
1753
1754 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
8978cc92
EBE
1755 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1756 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
c85023e1
HN
1757 return err;
1758
0042f9e4 1759 return mlx5_ib_enable_lb(dev, true, false);
c85023e1
HN
1760}
1761
d2d19121
YH
1762static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
1763 u16 uid)
c85023e1 1764{
cfdeb893
LR
1765 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1766 return;
1767
d2d19121 1768 mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
c85023e1
HN
1769
1770 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
8978cc92
EBE
1771 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1772 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
c85023e1
HN
1773 return;
1774
0042f9e4 1775 mlx5_ib_disable_lb(dev, true, false);
c85023e1
HN
1776}
1777
a2a074ef
LR
1778static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
1779 struct ib_udata *udata)
e126ba97 1780{
a2a074ef 1781 struct ib_device *ibdev = uctx->device;
e126ba97 1782 struct mlx5_ib_dev *dev = to_mdev(ibdev);
b368d7cb
MB
1783 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1784 struct mlx5_ib_alloc_ucontext_resp resp = {};
5c99eaec 1785 struct mlx5_core_dev *mdev = dev->mdev;
a2a074ef 1786 struct mlx5_ib_ucontext *context = to_mucontext(uctx);
2f5ff264 1787 struct mlx5_bfreg_info *bfregi;
78c0f98c 1788 int ver;
e126ba97 1789 int err;
a168a41c
MD
1790 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1791 max_cqe_version);
25bb36e7 1792 u32 dump_fill_mkey;
b037c29a 1793 bool lib_uar_4k;
e126ba97
EC
1794
1795 if (!dev->ib_active)
a2a074ef 1796 return -EAGAIN;
e126ba97 1797
e093111d 1798 if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
78c0f98c 1799 ver = 0;
e093111d 1800 else if (udata->inlen >= min_req_v2)
78c0f98c
EC
1801 ver = 2;
1802 else
a2a074ef 1803 return -EINVAL;
78c0f98c 1804
e093111d 1805 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
e126ba97 1806 if (err)
a2a074ef 1807 return err;
e126ba97 1808
a8b92ca1 1809 if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
a2a074ef 1810 return -EOPNOTSUPP;
78c0f98c 1811
f72300c5 1812 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
a2a074ef 1813 return -EOPNOTSUPP;
b368d7cb 1814
2f5ff264
EC
1815 req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1816 MLX5_NON_FP_BFREGS_PER_UAR);
1817 if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
a2a074ef 1818 return -EINVAL;
e126ba97 1819
938fe83c 1820 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
11f552e2 1821 if (dev->wc_support)
2cc6ad5f 1822 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
b47bd6ea 1823 resp.cache_line_size = cache_line_size();
938fe83c
SM
1824 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1825 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1826 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1827 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1828 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
f72300c5
HA
1829 resp.cqe_version = min_t(__u8,
1830 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1831 req.max_cqe_version);
30aa60b3
EC
1832 resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1833 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1834 resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1835 MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
b368d7cb
MB
1836 resp.response_length = min(offsetof(typeof(resp), response_length) +
1837 sizeof(resp.response_length), udata->outlen);
e126ba97 1838
c03faa56
MB
1839 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) {
1840 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS))
1841 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
1842 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
1843 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
1844 if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1845 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
1846 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
1847 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
1848 /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
1849 }
1850
30aa60b3 1851 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
2f5ff264 1852 bfregi = &context->bfregi;
b037c29a
EC
1853
1854 /* updates req->total_num_bfregs */
31a78a5a 1855 err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
b037c29a 1856 if (err)
e126ba97 1857 goto out_ctx;
e126ba97 1858
b037c29a
EC
1859 mutex_init(&bfregi->lock);
1860 bfregi->lib_uar_4k = lib_uar_4k;
31a78a5a 1861 bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
e126ba97 1862 GFP_KERNEL);
b037c29a 1863 if (!bfregi->count) {
e126ba97 1864 err = -ENOMEM;
b037c29a 1865 goto out_ctx;
e126ba97
EC
1866 }
1867
b037c29a
EC
1868 bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1869 sizeof(*bfregi->sys_pages),
1870 GFP_KERNEL);
1871 if (!bfregi->sys_pages) {
e126ba97 1872 err = -ENOMEM;
b037c29a 1873 goto out_count;
e126ba97
EC
1874 }
1875
b037c29a
EC
1876 err = allocate_uars(dev, context);
1877 if (err)
1878 goto out_sys_pages;
e126ba97 1879
a8b92ca1 1880 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
fb98153b 1881 err = mlx5_ib_devx_create(dev, true);
76dc5a84 1882 if (err < 0)
d2d19121 1883 goto out_uars;
76dc5a84 1884 context->devx_uid = err;
a8b92ca1
YH
1885 }
1886
d2d19121
YH
1887 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1888 context->devx_uid);
1889 if (err)
1890 goto out_devx;
1891
25bb36e7
YC
1892 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1893 err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
1894 if (err)
8193abb6 1895 goto out_mdev;
25bb36e7
YC
1896 }
1897
e126ba97
EC
1898 INIT_LIST_HEAD(&context->db_page_list);
1899 mutex_init(&context->db_page_mutex);
1900
2f5ff264 1901 resp.tot_bfregs = req.total_num_bfregs;
508562d6 1902 resp.num_ports = dev->num_ports;
b368d7cb 1903
f72300c5
HA
1904 if (field_avail(typeof(resp), cqe_version, udata->outlen))
1905 resp.response_length += sizeof(resp.cqe_version);
b368d7cb 1906
402ca536 1907 if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
6ad279c5
MS
1908 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1909 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
402ca536
BW
1910 resp.response_length += sizeof(resp.cmds_supp_uhw);
1911 }
1912
78984898
OG
1913 if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
1914 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1915 mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1916 resp.eth_min_inline++;
1917 }
1918 resp.response_length += sizeof(resp.eth_min_inline);
1919 }
1920
5c99eaec
FD
1921 if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
1922 if (mdev->clock_info)
1923 resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1924 resp.response_length += sizeof(resp.clock_info_versions);
1925 }
1926
bc5c6eed
NO
1927 /*
1928 * We don't want to expose information from the PCI bar that is located
1929 * after 4096 bytes, so if the arch only supports larger pages, let's
1930 * pretend we don't support reading the HCA's core clock. This is also
1931 * forced by mmap function.
1932 */
de8d6e02
EC
1933 if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1934 if (PAGE_SIZE <= 4096) {
1935 resp.comp_mask |=
1936 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1937 resp.hca_core_clock_offset =
1938 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1939 }
5c99eaec 1940 resp.response_length += sizeof(resp.hca_core_clock_offset);
b368d7cb
MB
1941 }
1942
30aa60b3
EC
1943 if (field_avail(typeof(resp), log_uar_size, udata->outlen))
1944 resp.response_length += sizeof(resp.log_uar_size);
1945
1946 if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
1947 resp.response_length += sizeof(resp.num_uars_per_page);
1948
31a78a5a
YH
1949 if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
1950 resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
1951 resp.response_length += sizeof(resp.num_dyn_bfregs);
1952 }
1953
25bb36e7
YC
1954 if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
1955 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1956 resp.dump_fill_mkey = dump_fill_mkey;
1957 resp.comp_mask |=
1958 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1959 }
1960 resp.response_length += sizeof(resp.dump_fill_mkey);
1961 }
1962
b368d7cb 1963 err = ib_copy_to_udata(udata, &resp, resp.response_length);
e126ba97 1964 if (err)
a8b92ca1 1965 goto out_mdev;
e126ba97 1966
2f5ff264
EC
1967 bfregi->ver = ver;
1968 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
f72300c5 1969 context->cqe_version = resp.cqe_version;
30aa60b3
EC
1970 context->lib_caps = req.lib_caps;
1971 print_lib_caps(dev, context->lib_caps);
f72300c5 1972
7c34ec19 1973 if (dev->lag_active) {
95579e78 1974 u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
c6a21c38
MD
1975
1976 atomic_set(&context->tx_port_affinity,
1977 atomic_add_return(
95579e78 1978 1, &dev->port[port].roce.tx_port_affinity));
c6a21c38
MD
1979 }
1980
a2a074ef 1981 return 0;
e126ba97 1982
a8b92ca1 1983out_mdev:
d2d19121
YH
1984 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1985out_devx:
a8b92ca1 1986 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
76dc5a84 1987 mlx5_ib_devx_destroy(dev, context->devx_uid);
146d2f1a 1988
e126ba97 1989out_uars:
b037c29a 1990 deallocate_uars(dev, context);
e126ba97 1991
b037c29a
EC
1992out_sys_pages:
1993 kfree(bfregi->sys_pages);
e126ba97 1994
b037c29a
EC
1995out_count:
1996 kfree(bfregi->count);
e126ba97
EC
1997
1998out_ctx:
a2a074ef 1999 return err;
e126ba97
EC
2000}
2001
a2a074ef 2002static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
e126ba97
EC
2003{
2004 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2005 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
b037c29a 2006 struct mlx5_bfreg_info *bfregi;
e126ba97 2007
b037c29a 2008 bfregi = &context->bfregi;
d2d19121
YH
2009 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
2010
a8b92ca1 2011 if (context->devx_uid)
76dc5a84 2012 mlx5_ib_devx_destroy(dev, context->devx_uid);
146d2f1a 2013
b037c29a
EC
2014 deallocate_uars(dev, context);
2015 kfree(bfregi->sys_pages);
2f5ff264 2016 kfree(bfregi->count);
e126ba97
EC
2017}
2018
b037c29a 2019static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
4ed131d0 2020 int uar_idx)
e126ba97 2021{
b037c29a
EC
2022 int fw_uars_per_page;
2023
2024 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2025
aa8106f1 2026 return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
e126ba97
EC
2027}
2028
2029static int get_command(unsigned long offset)
2030{
2031 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
2032}
2033
2034static int get_arg(unsigned long offset)
2035{
2036 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
2037}
2038
2039static int get_index(unsigned long offset)
2040{
2041 return get_arg(offset);
2042}
2043
4ed131d0
YH
2044/* Index resides in an extra byte to enable larger values than 255 */
2045static int get_extended_index(unsigned long offset)
2046{
2047 return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
2048}
2049
7c2344c3
MG
2050
2051static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
2052{
7c2344c3
MG
2053}
2054
37aa5c36
GL
2055static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
2056{
2057 switch (cmd) {
2058 case MLX5_IB_MMAP_WC_PAGE:
2059 return "WC";
2060 case MLX5_IB_MMAP_REGULAR_PAGE:
2061 return "best effort WC";
2062 case MLX5_IB_MMAP_NC_PAGE:
2063 return "NC";
24da0016
AL
2064 case MLX5_IB_MMAP_DEVICE_MEM:
2065 return "Device Memory";
37aa5c36
GL
2066 default:
2067 return NULL;
2068 }
2069}
2070
5c99eaec
FD
2071static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2072 struct vm_area_struct *vma,
2073 struct mlx5_ib_ucontext *context)
2074{
4eb6ab13
JG
2075 if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
2076 !(vma->vm_flags & VM_SHARED))
5c99eaec
FD
2077 return -EINVAL;
2078
2079 if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2080 return -EOPNOTSUPP;
2081
4eb6ab13 2082 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
5c99eaec 2083 return -EPERM;
c660133c 2084 vma->vm_flags &= ~VM_MAYWRITE;
5c99eaec 2085
ddcdc368 2086 if (!dev->mdev->clock_info)
5c99eaec
FD
2087 return -EOPNOTSUPP;
2088
4eb6ab13
JG
2089 return vm_insert_page(vma, vma->vm_start,
2090 virt_to_page(dev->mdev->clock_info));
5c99eaec
FD
2091}
2092
dc2316eb
YH
2093static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
2094{
2095 struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
2096 struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
7be76bef 2097 struct mlx5_var_table *var_table = &dev->var_table;
dc2316eb
YH
2098 struct mlx5_ib_dm *mdm;
2099
2100 switch (mentry->mmap_flag) {
2101 case MLX5_IB_MMAP_TYPE_MEMIC:
2102 mdm = container_of(mentry, struct mlx5_ib_dm, mentry);
2103 mlx5_cmd_dealloc_memic(&dev->dm, mdm->dev_addr,
2104 mdm->size);
2105 kfree(mdm);
2106 break;
7be76bef
YH
2107 case MLX5_IB_MMAP_TYPE_VAR:
2108 mutex_lock(&var_table->bitmap_lock);
2109 clear_bit(mentry->page_idx, var_table->bitmap);
2110 mutex_unlock(&var_table->bitmap_lock);
2111 kfree(mentry);
2112 break;
dc2316eb
YH
2113 default:
2114 WARN_ON(true);
2115 }
2116}
2117
37aa5c36 2118static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
7c2344c3
MG
2119 struct vm_area_struct *vma,
2120 struct mlx5_ib_ucontext *context)
37aa5c36 2121{
2f5ff264 2122 struct mlx5_bfreg_info *bfregi = &context->bfregi;
37aa5c36
GL
2123 int err;
2124 unsigned long idx;
aa09ea6e 2125 phys_addr_t pfn;
37aa5c36 2126 pgprot_t prot;
4ed131d0
YH
2127 u32 bfreg_dyn_idx = 0;
2128 u32 uar_index;
2129 int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
2130 int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
2131 bfregi->num_static_sys_pages;
b037c29a
EC
2132
2133 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2134 return -EINVAL;
2135
4ed131d0
YH
2136 if (dyn_uar)
2137 idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
2138 else
2139 idx = get_index(vma->vm_pgoff);
2140
2141 if (idx >= max_valid_idx) {
2142 mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2143 idx, max_valid_idx);
b037c29a
EC
2144 return -EINVAL;
2145 }
37aa5c36
GL
2146
2147 switch (cmd) {
2148 case MLX5_IB_MMAP_WC_PAGE:
4ed131d0 2149 case MLX5_IB_MMAP_ALLOC_WC:
37aa5c36
GL
2150/* Some architectures don't support WC memory */
2151#if defined(CONFIG_X86)
2152 if (!pat_enabled())
2153 return -EPERM;
2154#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
2155 return -EPERM;
2156#endif
2157 /* fall through */
2158 case MLX5_IB_MMAP_REGULAR_PAGE:
2159 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2160 prot = pgprot_writecombine(vma->vm_page_prot);
2161 break;
2162 case MLX5_IB_MMAP_NC_PAGE:
2163 prot = pgprot_noncached(vma->vm_page_prot);
2164 break;
2165 default:
2166 return -EINVAL;
2167 }
2168
4ed131d0
YH
2169 if (dyn_uar) {
2170 int uars_per_page;
2171
2172 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2173 bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
2174 if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
2175 mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2176 bfreg_dyn_idx, bfregi->total_num_bfregs);
2177 return -EINVAL;
2178 }
2179
2180 mutex_lock(&bfregi->lock);
2181 /* Fail if uar already allocated, first bfreg index of each
2182 * page holds its count.
2183 */
2184 if (bfregi->count[bfreg_dyn_idx]) {
2185 mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2186 mutex_unlock(&bfregi->lock);
2187 return -EINVAL;
2188 }
2189
2190 bfregi->count[bfreg_dyn_idx]++;
2191 mutex_unlock(&bfregi->lock);
2192
2193 err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
2194 if (err) {
2195 mlx5_ib_warn(dev, "UAR alloc failed\n");
2196 goto free_bfreg;
2197 }
2198 } else {
2199 uar_index = bfregi->sys_pages[idx];
2200 }
2201
2202 pfn = uar_index2pfn(dev, uar_index);
37aa5c36
GL
2203 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2204
e2cd1d1a 2205 err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
c043ff2c 2206 prot, NULL);
37aa5c36 2207 if (err) {
8f062287 2208 mlx5_ib_err(dev,
e2cd1d1a 2209 "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
8f062287 2210 err, mmap_cmd2str(cmd));
4ed131d0 2211 goto err;
37aa5c36
GL
2212 }
2213
4ed131d0
YH
2214 if (dyn_uar)
2215 bfregi->sys_pages[idx] = uar_index;
2216 return 0;
2217
2218err:
2219 if (!dyn_uar)
2220 return err;
2221
2222 mlx5_cmd_free_uar(dev->mdev, idx);
2223
2224free_bfreg:
2225 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2226
2227 return err;
37aa5c36
GL
2228}
2229
dc2316eb
YH
2230static int add_dm_mmap_entry(struct ib_ucontext *context,
2231 struct mlx5_ib_dm *mdm,
2232 u64 address)
2233{
2234 mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC;
2235 mdm->mentry.address = address;
2236 return rdma_user_mmap_entry_insert_range(
2237 context, &mdm->mentry.rdma_entry,
2238 mdm->size,
2239 MLX5_IB_MMAP_DEVICE_MEM << 16,
2240 (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
2241}
2242
2243static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
2244{
2245 unsigned long idx;
2246 u8 command;
2247
2248 command = get_command(vma->vm_pgoff);
2249 idx = get_extended_index(vma->vm_pgoff);
2250
2251 return (command << 16 | idx);
2252}
2253
2254static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
2255 struct vm_area_struct *vma,
2256 struct ib_ucontext *ucontext)
24da0016 2257{
dc2316eb
YH
2258 struct mlx5_user_mmap_entry *mentry;
2259 struct rdma_user_mmap_entry *entry;
2260 unsigned long pgoff;
2261 pgprot_t prot;
24da0016 2262 phys_addr_t pfn;
dc2316eb 2263 int ret;
24da0016 2264
dc2316eb
YH
2265 pgoff = mlx5_vma_to_pgoff(vma);
2266 entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff);
2267 if (!entry)
24da0016
AL
2268 return -EINVAL;
2269
dc2316eb
YH
2270 mentry = to_mmmap(entry);
2271 pfn = (mentry->address >> PAGE_SHIFT);
3f59b6c3
YH
2272 if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR)
2273 prot = pgprot_noncached(vma->vm_page_prot);
2274 else
2275 prot = pgprot_writecombine(vma->vm_page_prot);
dc2316eb
YH
2276 ret = rdma_user_mmap_io(ucontext, vma, pfn,
2277 entry->npages * PAGE_SIZE,
2278 prot,
2279 entry);
2280 rdma_user_mmap_entry_put(&mentry->rdma_entry);
2281 return ret;
24da0016
AL
2282}
2283
7be76bef
YH
2284static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry)
2285{
9b6d3bbc
LR
2286 u64 cmd = (entry->rdma_entry.start_pgoff >> 16) & 0xFFFF;
2287 u64 index = entry->rdma_entry.start_pgoff & 0xFFFF;
7be76bef
YH
2288
2289 return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) |
2290 (index & 0xFF)) << PAGE_SHIFT;
2291}
2292
e126ba97
EC
2293static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2294{
2295 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2296 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
e126ba97 2297 unsigned long command;
e126ba97
EC
2298 phys_addr_t pfn;
2299
2300 command = get_command(vma->vm_pgoff);
2301 switch (command) {
37aa5c36
GL
2302 case MLX5_IB_MMAP_WC_PAGE:
2303 case MLX5_IB_MMAP_NC_PAGE:
e126ba97 2304 case MLX5_IB_MMAP_REGULAR_PAGE:
4ed131d0 2305 case MLX5_IB_MMAP_ALLOC_WC:
7c2344c3 2306 return uar_mmap(dev, command, vma, context);
e126ba97
EC
2307
2308 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2309 return -ENOSYS;
2310
d69e3bcf 2311 case MLX5_IB_MMAP_CORE_CLOCK:
d69e3bcf
MB
2312 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2313 return -EINVAL;
2314
6cbac1e4 2315 if (vma->vm_flags & VM_WRITE)
d69e3bcf 2316 return -EPERM;
c660133c 2317 vma->vm_flags &= ~VM_MAYWRITE;
d69e3bcf
MB
2318
2319 /* Don't expose to user-space information it shouldn't have */
2320 if (PAGE_SIZE > 4096)
2321 return -EOPNOTSUPP;
2322
d69e3bcf
MB
2323 pfn = (dev->mdev->iseg_base +
2324 offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2325 PAGE_SHIFT;
d5e560d3
JG
2326 return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
2327 PAGE_SIZE,
c043ff2c
MK
2328 pgprot_noncached(vma->vm_page_prot),
2329 NULL);
5c99eaec
FD
2330 case MLX5_IB_MMAP_CLOCK_INFO:
2331 return mlx5_ib_mmap_clock_info_page(dev, vma, context);
d69e3bcf 2332
e126ba97 2333 default:
dc2316eb 2334 return mlx5_ib_mmap_offset(dev, vma, ibcontext);
e126ba97
EC
2335 }
2336
2337 return 0;
2338}
2339
25c13324
AL
2340static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
2341 u32 type)
24da0016 2342{
25c13324
AL
2343 switch (type) {
2344 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2345 if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
2346 return -EOPNOTSUPP;
2347 break;
2348 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
c9b9dcb4 2349 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
25c13324
AL
2350 if (!capable(CAP_SYS_RAWIO) ||
2351 !capable(CAP_NET_RAW))
2352 return -EPERM;
2353
2354 if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
2355 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner)))
2356 return -EOPNOTSUPP;
2357 break;
2358 }
2359
2360 return 0;
2361}
2362
3b113a1e
AL
2363static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
2364 struct mlx5_ib_dm *dm,
2365 struct ib_dm_alloc_attr *attr,
2366 struct uverbs_attr_bundle *attrs)
24da0016 2367{
3b113a1e 2368 struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
24da0016 2369 u64 start_offset;
dc2316eb 2370 u16 page_idx;
24da0016 2371 int err;
dc2316eb 2372 u64 address;
24da0016 2373
3b113a1e 2374 dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
24da0016 2375
3b113a1e
AL
2376 err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr,
2377 dm->size, attr->alignment);
24da0016 2378 if (err)
3b113a1e 2379 return err;
24da0016 2380
dc2316eb
YH
2381 address = dm->dev_addr & PAGE_MASK;
2382 err = add_dm_mmap_entry(ctx, dm, address);
2383 if (err)
2384 goto err_dealloc;
24da0016 2385
dc2316eb 2386 page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
24da0016 2387 err = uverbs_copy_to(attrs,
3b113a1e 2388 MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
dc2316eb
YH
2389 &page_idx,
2390 sizeof(page_idx));
24da0016 2391 if (err)
dc2316eb 2392 goto err_copy;
24da0016 2393
3b113a1e 2394 start_offset = dm->dev_addr & ~PAGE_MASK;
24da0016
AL
2395 err = uverbs_copy_to(attrs,
2396 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2397 &start_offset, sizeof(start_offset));
2398 if (err)
dc2316eb 2399 goto err_copy;
3b113a1e
AL
2400
2401 return 0;
2402
dc2316eb
YH
2403err_copy:
2404 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
3b113a1e
AL
2405err_dealloc:
2406 mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
2407
2408 return err;
2409}
2410
25c13324
AL
2411static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
2412 struct mlx5_ib_dm *dm,
2413 struct ib_dm_alloc_attr *attr,
2414 struct uverbs_attr_bundle *attrs,
2415 int type)
2416{
c9b9dcb4 2417 struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
25c13324
AL
2418 u64 act_size;
2419 int err;
2420
2421 /* Allocation size must a multiple of the basic block size
2422 * and a power of 2.
2423 */
c9b9dcb4 2424 act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
25c13324
AL
2425 act_size = roundup_pow_of_two(act_size);
2426
2427 dm->size = act_size;
c9b9dcb4
AL
2428 err = mlx5_dm_sw_icm_alloc(dev, type, act_size,
2429 to_mucontext(ctx)->devx_uid, &dm->dev_addr,
2430 &dm->icm_dm.obj_id);
25c13324
AL
2431 if (err)
2432 return err;
2433
24da0016 2434 err = uverbs_copy_to(attrs,
25c13324
AL
2435 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2436 &dm->dev_addr, sizeof(dm->dev_addr));
24da0016 2437 if (err)
c9b9dcb4
AL
2438 mlx5_dm_sw_icm_dealloc(dev, type, dm->size,
2439 to_mucontext(ctx)->devx_uid, dm->dev_addr,
2440 dm->icm_dm.obj_id);
25c13324
AL
2441
2442 return err;
2443}
2444
3b113a1e
AL
2445struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
2446 struct ib_ucontext *context,
2447 struct ib_dm_alloc_attr *attr,
2448 struct uverbs_attr_bundle *attrs)
2449{
2450 struct mlx5_ib_dm *dm;
2451 enum mlx5_ib_uapi_dm_type type;
2452 int err;
24da0016 2453
3b113a1e
AL
2454 err = uverbs_get_const_default(&type, attrs,
2455 MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
2456 MLX5_IB_UAPI_DM_TYPE_MEMIC);
2457 if (err)
2458 return ERR_PTR(err);
24da0016 2459
3b113a1e
AL
2460 mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
2461 type, attr->length, attr->alignment);
2462
25c13324
AL
2463 err = check_dm_type_support(to_mdev(ibdev), type);
2464 if (err)
2465 return ERR_PTR(err);
2466
3b113a1e
AL
2467 dm = kzalloc(sizeof(*dm), GFP_KERNEL);
2468 if (!dm)
2469 return ERR_PTR(-ENOMEM);
2470
2471 dm->type = type;
2472
2473 switch (type) {
2474 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
2475 err = handle_alloc_dm_memic(context, dm,
2476 attr,
2477 attrs);
2478 break;
25c13324 2479 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
c9b9dcb4
AL
2480 err = handle_alloc_dm_sw_icm(context, dm,
2481 attr, attrs,
2482 MLX5_SW_ICM_TYPE_STEERING);
2483 break;
25c13324 2484 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
c9b9dcb4
AL
2485 err = handle_alloc_dm_sw_icm(context, dm,
2486 attr, attrs,
2487 MLX5_SW_ICM_TYPE_HEADER_MODIFY);
25c13324 2488 break;
3b113a1e
AL
2489 default:
2490 err = -EOPNOTSUPP;
2491 }
24da0016 2492
3b113a1e
AL
2493 if (err)
2494 goto err_free;
24da0016
AL
2495
2496 return &dm->ibdm;
2497
24da0016
AL
2498err_free:
2499 kfree(dm);
2500 return ERR_PTR(err);
2501}
2502
c4367a26 2503int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
24da0016 2504{
25c13324
AL
2505 struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
2506 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
c9b9dcb4 2507 struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
24da0016 2508 struct mlx5_ib_dm *dm = to_mdm(ibdm);
24da0016
AL
2509 int ret;
2510
3b113a1e
AL
2511 switch (dm->type) {
2512 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
dc2316eb
YH
2513 rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
2514 return 0;
25c13324 2515 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
c9b9dcb4
AL
2516 ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
2517 dm->size, ctx->devx_uid, dm->dev_addr,
2518 dm->icm_dm.obj_id);
2519 if (ret)
2520 return ret;
2521 break;
25c13324 2522 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
c9b9dcb4
AL
2523 ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY,
2524 dm->size, ctx->devx_uid, dm->dev_addr,
2525 dm->icm_dm.obj_id);
25c13324
AL
2526 if (ret)
2527 return ret;
3b113a1e
AL
2528 break;
2529 default:
2530 return -EOPNOTSUPP;
2531 }
24da0016
AL
2532
2533 kfree(dm);
2534
2535 return 0;
2536}
2537
ff23dfa1 2538static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
e126ba97 2539{
21a428a0
LR
2540 struct mlx5_ib_pd *pd = to_mpd(ibpd);
2541 struct ib_device *ibdev = ibpd->device;
e126ba97 2542 struct mlx5_ib_alloc_pd_resp resp;
e126ba97 2543 int err;
a1069c1c
YH
2544 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2545 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
2546 u16 uid = 0;
ff23dfa1
SR
2547 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
2548 udata, struct mlx5_ib_ucontext, ibucontext);
e126ba97 2549
ff23dfa1 2550 uid = context ? context->devx_uid : 0;
a1069c1c
YH
2551 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2552 MLX5_SET(alloc_pd_in, in, uid, uid);
2553 err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
2554 out, sizeof(out));
21a428a0
LR
2555 if (err)
2556 return err;
e126ba97 2557
a1069c1c
YH
2558 pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2559 pd->uid = uid;
ff23dfa1 2560 if (udata) {
e126ba97
EC
2561 resp.pdn = pd->pdn;
2562 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
a1069c1c 2563 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
21a428a0 2564 return -EFAULT;
e126ba97 2565 }
e126ba97
EC
2566 }
2567
21a428a0 2568 return 0;
e126ba97
EC
2569}
2570
c4367a26 2571static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
e126ba97
EC
2572{
2573 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2574 struct mlx5_ib_pd *mpd = to_mpd(pd);
2575
a1069c1c 2576 mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
e126ba97
EC
2577}
2578
466fa6d2
MG
2579enum {
2580 MATCH_CRITERIA_ENABLE_OUTER_BIT,
2581 MATCH_CRITERIA_ENABLE_MISC_BIT,
71c6e863
AL
2582 MATCH_CRITERIA_ENABLE_INNER_BIT,
2583 MATCH_CRITERIA_ENABLE_MISC2_BIT
466fa6d2
MG
2584};
2585
2586#define HEADER_IS_ZERO(match_criteria, headers) \
2587 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2588 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
038d2ef8 2589
466fa6d2 2590static u8 get_match_criteria_enable(u32 *match_criteria)
038d2ef8 2591{
466fa6d2 2592 u8 match_criteria_enable;
038d2ef8 2593
466fa6d2
MG
2594 match_criteria_enable =
2595 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2596 MATCH_CRITERIA_ENABLE_OUTER_BIT;
2597 match_criteria_enable |=
2598 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2599 MATCH_CRITERIA_ENABLE_MISC_BIT;
2600 match_criteria_enable |=
2601 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2602 MATCH_CRITERIA_ENABLE_INNER_BIT;
71c6e863
AL
2603 match_criteria_enable |=
2604 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2605 MATCH_CRITERIA_ENABLE_MISC2_BIT;
466fa6d2
MG
2606
2607 return match_criteria_enable;
038d2ef8
MG
2608}
2609
6113cc44 2610static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
ca0d4753 2611{
6113cc44
MG
2612 u8 entry_mask;
2613 u8 entry_val;
2614 int err = 0;
2615
2616 if (!mask)
2617 goto out;
2618
2619 entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c,
2620 ip_protocol);
2621 entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v,
2622 ip_protocol);
2623 if (!entry_mask) {
2624 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
2625 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
2626 goto out;
2627 }
2628 /* Don't override existing ip protocol */
2629 if (mask != entry_mask || val != entry_val)
2630 err = -EINVAL;
2631out:
2632 return err;
038d2ef8
MG
2633}
2634
37da2a03 2635static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
2d1e697e
MR
2636 bool inner)
2637{
2638 if (inner) {
2639 MLX5_SET(fte_match_set_misc,
2640 misc_c, inner_ipv6_flow_label, mask);
2641 MLX5_SET(fte_match_set_misc,
2642 misc_v, inner_ipv6_flow_label, val);
2643 } else {
2644 MLX5_SET(fte_match_set_misc,
2645 misc_c, outer_ipv6_flow_label, mask);
2646 MLX5_SET(fte_match_set_misc,
2647 misc_v, outer_ipv6_flow_label, val);
2648 }
2649}
2650
ca0d4753
MG
2651static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
2652{
2653 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
2654 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
2655 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
2656 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
2657}
2658
71c6e863
AL
2659static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
2660{
2661 if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
2662 !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
2663 return -EOPNOTSUPP;
2664
2665 if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
2666 !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
2667 return -EOPNOTSUPP;
2668
2669 if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
2670 !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
2671 return -EOPNOTSUPP;
2672
2673 if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
2674 !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
2675 return -EOPNOTSUPP;
2676
2677 return 0;
2678}
2679
c47ac6ae
MG
2680#define LAST_ETH_FIELD vlan_tag
2681#define LAST_IB_FIELD sl
ca0d4753 2682#define LAST_IPV4_FIELD tos
466fa6d2 2683#define LAST_IPV6_FIELD traffic_class
c47ac6ae 2684#define LAST_TCP_UDP_FIELD src_port
ffb30d8f 2685#define LAST_TUNNEL_FIELD tunnel_id
2ac693f9 2686#define LAST_FLOW_TAG_FIELD tag_id
a22ed86c 2687#define LAST_DROP_FIELD size
3b3233fb 2688#define LAST_COUNTERS_FIELD counters
c47ac6ae
MG
2689
2690/* Field is the last supported field */
2691#define FIELDS_NOT_SUPPORTED(filter, field)\
2692 memchr_inv((void *)&filter.field +\
2693 sizeof(filter.field), 0,\
2694 sizeof(filter) -\
2695 offsetof(typeof(filter), field) -\
2696 sizeof(filter.field))
2697
2ea26203
MB
2698int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
2699 bool is_egress,
2700 struct mlx5_flow_act *action)
802c2125 2701{
802c2125
AY
2702
2703 switch (maction->ib_action.type) {
2704 case IB_FLOW_ACTION_ESP:
501f14e3
MB
2705 if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
2706 MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
2707 return -EINVAL;
802c2125
AY
2708 /* Currently only AES_GCM keymat is supported by the driver */
2709 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
2ea26203 2710 action->action |= is_egress ?
802c2125
AY
2711 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
2712 MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
2713 return 0;
b1085be3
MB
2714 case IB_FLOW_ACTION_UNSPECIFIED:
2715 if (maction->flow_action_raw.sub_type ==
2716 MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
501f14e3
MB
2717 if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2718 return -EINVAL;
b1085be3 2719 action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2b688ea5
MG
2720 action->modify_hdr =
2721 maction->flow_action_raw.modify_hdr;
b1085be3
MB
2722 return 0;
2723 }
10a30896
MB
2724 if (maction->flow_action_raw.sub_type ==
2725 MLX5_IB_FLOW_ACTION_DECAP) {
501f14e3
MB
2726 if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2727 return -EINVAL;
10a30896
MB
2728 action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2729 return 0;
2730 }
e806f932
MB
2731 if (maction->flow_action_raw.sub_type ==
2732 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
501f14e3
MB
2733 if (action->action &
2734 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
2735 return -EINVAL;
e806f932
MB
2736 action->action |=
2737 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2b688ea5
MG
2738 action->pkt_reformat =
2739 maction->flow_action_raw.pkt_reformat;
e806f932
MB
2740 return 0;
2741 }
b1085be3 2742 /* fall through */
802c2125
AY
2743 default:
2744 return -EOPNOTSUPP;
2745 }
2746}
2747
bb0ee7dc
JL
2748static int parse_flow_attr(struct mlx5_core_dev *mdev,
2749 struct mlx5_flow_spec *spec,
2750 const union ib_flow_spec *ib_spec,
802c2125 2751 const struct ib_flow_attr *flow_attr,
71c6e863 2752 struct mlx5_flow_act *action, u32 prev_type)
038d2ef8 2753{
bb0ee7dc
JL
2754 struct mlx5_flow_context *flow_context = &spec->flow_context;
2755 u32 *match_c = spec->match_criteria;
2756 u32 *match_v = spec->match_value;
466fa6d2
MG
2757 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
2758 misc_parameters);
2759 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
2760 misc_parameters);
71c6e863
AL
2761 void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
2762 misc_parameters_2);
2763 void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
2764 misc_parameters_2);
2d1e697e
MR
2765 void *headers_c;
2766 void *headers_v;
19cc7524 2767 int match_ipv;
802c2125 2768 int ret;
2d1e697e
MR
2769
2770 if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2771 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2772 inner_headers);
2773 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2774 inner_headers);
19cc7524
AL
2775 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2776 ft_field_support.inner_ip_version);
2d1e697e
MR
2777 } else {
2778 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2779 outer_headers);
2780 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2781 outer_headers);
19cc7524
AL
2782 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2783 ft_field_support.outer_ip_version);
2d1e697e 2784 }
466fa6d2 2785
2d1e697e 2786 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
038d2ef8 2787 case IB_FLOW_SPEC_ETH:
c47ac6ae 2788 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1ffd3a26 2789 return -EOPNOTSUPP;
038d2ef8 2790
2d1e697e 2791 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
038d2ef8
MG
2792 dmac_47_16),
2793 ib_spec->eth.mask.dst_mac);
2d1e697e 2794 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
038d2ef8
MG
2795 dmac_47_16),
2796 ib_spec->eth.val.dst_mac);
2797
2d1e697e 2798 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
ee3da804
MG
2799 smac_47_16),
2800 ib_spec->eth.mask.src_mac);
2d1e697e 2801 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
ee3da804
MG
2802 smac_47_16),
2803 ib_spec->eth.val.src_mac);
2804
038d2ef8 2805 if (ib_spec->eth.mask.vlan_tag) {
2d1e697e 2806 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
10543365 2807 cvlan_tag, 1);
2d1e697e 2808 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
10543365 2809 cvlan_tag, 1);
038d2ef8 2810
2d1e697e 2811 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
038d2ef8 2812 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
2d1e697e 2813 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
038d2ef8
MG
2814 first_vid, ntohs(ib_spec->eth.val.vlan_tag));
2815
2d1e697e 2816 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
038d2ef8
MG
2817 first_cfi,
2818 ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
2d1e697e 2819 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
038d2ef8
MG
2820 first_cfi,
2821 ntohs(ib_spec->eth.val.vlan_tag) >> 12);
2822
2d1e697e 2823 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
038d2ef8
MG
2824 first_prio,
2825 ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
2d1e697e 2826 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
038d2ef8
MG
2827 first_prio,
2828 ntohs(ib_spec->eth.val.vlan_tag) >> 13);
2829 }
2d1e697e 2830 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
038d2ef8 2831 ethertype, ntohs(ib_spec->eth.mask.ether_type));
2d1e697e 2832 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
038d2ef8
MG
2833 ethertype, ntohs(ib_spec->eth.val.ether_type));
2834 break;
2835 case IB_FLOW_SPEC_IPV4:
c47ac6ae 2836 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1ffd3a26 2837 return -EOPNOTSUPP;
038d2ef8 2838
19cc7524
AL
2839 if (match_ipv) {
2840 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2841 ip_version, 0xf);
2842 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
3346c487 2843 ip_version, MLX5_FS_IPV4_VERSION);
19cc7524
AL
2844 } else {
2845 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2846 ethertype, 0xffff);
2847 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2848 ethertype, ETH_P_IP);
2849 }
038d2ef8 2850
2d1e697e 2851 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
038d2ef8
MG
2852 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2853 &ib_spec->ipv4.mask.src_ip,
2854 sizeof(ib_spec->ipv4.mask.src_ip));
2d1e697e 2855 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
038d2ef8
MG
2856 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2857 &ib_spec->ipv4.val.src_ip,
2858 sizeof(ib_spec->ipv4.val.src_ip));
2d1e697e 2859 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
038d2ef8
MG
2860 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2861 &ib_spec->ipv4.mask.dst_ip,
2862 sizeof(ib_spec->ipv4.mask.dst_ip));
2d1e697e 2863 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
038d2ef8
MG
2864 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2865 &ib_spec->ipv4.val.dst_ip,
2866 sizeof(ib_spec->ipv4.val.dst_ip));
ca0d4753 2867
2d1e697e 2868 set_tos(headers_c, headers_v,
ca0d4753
MG
2869 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
2870
6113cc44
MG
2871 if (set_proto(headers_c, headers_v,
2872 ib_spec->ipv4.mask.proto,
2873 ib_spec->ipv4.val.proto))
2874 return -EINVAL;
038d2ef8 2875 break;
026bae0c 2876 case IB_FLOW_SPEC_IPV6:
c47ac6ae 2877 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
1ffd3a26 2878 return -EOPNOTSUPP;
026bae0c 2879
19cc7524
AL
2880 if (match_ipv) {
2881 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2882 ip_version, 0xf);
2883 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
3346c487 2884 ip_version, MLX5_FS_IPV6_VERSION);
19cc7524
AL
2885 } else {
2886 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2887 ethertype, 0xffff);
2888 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2889 ethertype, ETH_P_IPV6);
2890 }
026bae0c 2891
2d1e697e 2892 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
026bae0c
MG
2893 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2894 &ib_spec->ipv6.mask.src_ip,
2895 sizeof(ib_spec->ipv6.mask.src_ip));
2d1e697e 2896 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
026bae0c
MG
2897 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2898 &ib_spec->ipv6.val.src_ip,
2899 sizeof(ib_spec->ipv6.val.src_ip));
2d1e697e 2900 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
026bae0c
MG
2901 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2902 &ib_spec->ipv6.mask.dst_ip,
2903 sizeof(ib_spec->ipv6.mask.dst_ip));
2d1e697e 2904 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
026bae0c
MG
2905 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2906 &ib_spec->ipv6.val.dst_ip,
2907 sizeof(ib_spec->ipv6.val.dst_ip));
466fa6d2 2908
2d1e697e 2909 set_tos(headers_c, headers_v,
466fa6d2
MG
2910 ib_spec->ipv6.mask.traffic_class,
2911 ib_spec->ipv6.val.traffic_class);
2912
6113cc44
MG
2913 if (set_proto(headers_c, headers_v,
2914 ib_spec->ipv6.mask.next_hdr,
2915 ib_spec->ipv6.val.next_hdr))
2916 return -EINVAL;
466fa6d2 2917
2d1e697e
MR
2918 set_flow_label(misc_params_c, misc_params_v,
2919 ntohl(ib_spec->ipv6.mask.flow_label),
2920 ntohl(ib_spec->ipv6.val.flow_label),
2921 ib_spec->type & IB_FLOW_SPEC_INNER);
802c2125
AY
2922 break;
2923 case IB_FLOW_SPEC_ESP:
2924 if (ib_spec->esp.mask.seq)
2925 return -EOPNOTSUPP;
2d1e697e 2926
802c2125
AY
2927 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
2928 ntohl(ib_spec->esp.mask.spi));
2929 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
2930 ntohl(ib_spec->esp.val.spi));
026bae0c 2931 break;
038d2ef8 2932 case IB_FLOW_SPEC_TCP:
c47ac6ae
MG
2933 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2934 LAST_TCP_UDP_FIELD))
1ffd3a26 2935 return -EOPNOTSUPP;
038d2ef8 2936
6113cc44
MG
2937 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP))
2938 return -EINVAL;
038d2ef8 2939
2d1e697e 2940 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
038d2ef8 2941 ntohs(ib_spec->tcp_udp.mask.src_port));
2d1e697e 2942 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
038d2ef8
MG
2943 ntohs(ib_spec->tcp_udp.val.src_port));
2944
2d1e697e 2945 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
038d2ef8 2946 ntohs(ib_spec->tcp_udp.mask.dst_port));
2d1e697e 2947 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
038d2ef8
MG
2948 ntohs(ib_spec->tcp_udp.val.dst_port));
2949 break;
2950 case IB_FLOW_SPEC_UDP:
c47ac6ae
MG
2951 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2952 LAST_TCP_UDP_FIELD))
1ffd3a26 2953 return -EOPNOTSUPP;
038d2ef8 2954
6113cc44
MG
2955 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP))
2956 return -EINVAL;
038d2ef8 2957
2d1e697e 2958 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
038d2ef8 2959 ntohs(ib_spec->tcp_udp.mask.src_port));
2d1e697e 2960 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
038d2ef8
MG
2961 ntohs(ib_spec->tcp_udp.val.src_port));
2962
2d1e697e 2963 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
038d2ef8 2964 ntohs(ib_spec->tcp_udp.mask.dst_port));
2d1e697e 2965 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
038d2ef8
MG
2966 ntohs(ib_spec->tcp_udp.val.dst_port));
2967 break;
da2f22ae
AL
2968 case IB_FLOW_SPEC_GRE:
2969 if (ib_spec->gre.mask.c_ks_res0_ver)
2970 return -EOPNOTSUPP;
2971
6113cc44
MG
2972 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE))
2973 return -EINVAL;
2974
da2f22ae
AL
2975 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2976 0xff);
2977 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2978 IPPROTO_GRE);
2979
2980 MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
a93b632c 2981 ntohs(ib_spec->gre.mask.protocol));
da2f22ae
AL
2982 MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
2983 ntohs(ib_spec->gre.val.protocol));
2984
2985 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
5886a96a 2986 gre_key.nvgre.hi),
da2f22ae
AL
2987 &ib_spec->gre.mask.key,
2988 sizeof(ib_spec->gre.mask.key));
2989 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
5886a96a 2990 gre_key.nvgre.hi),
da2f22ae
AL
2991 &ib_spec->gre.val.key,
2992 sizeof(ib_spec->gre.val.key));
2993 break;
71c6e863
AL
2994 case IB_FLOW_SPEC_MPLS:
2995 switch (prev_type) {
2996 case IB_FLOW_SPEC_UDP:
2997 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2998 ft_field_support.outer_first_mpls_over_udp),
2999 &ib_spec->mpls.mask.tag))
3000 return -EOPNOTSUPP;
3001
3002 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
3003 outer_first_mpls_over_udp),
3004 &ib_spec->mpls.val.tag,
3005 sizeof(ib_spec->mpls.val.tag));
3006 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
3007 outer_first_mpls_over_udp),
3008 &ib_spec->mpls.mask.tag,
3009 sizeof(ib_spec->mpls.mask.tag));
3010 break;
3011 case IB_FLOW_SPEC_GRE:
3012 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
3013 ft_field_support.outer_first_mpls_over_gre),
3014 &ib_spec->mpls.mask.tag))
3015 return -EOPNOTSUPP;
3016
3017 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
3018 outer_first_mpls_over_gre),
3019 &ib_spec->mpls.val.tag,
3020 sizeof(ib_spec->mpls.val.tag));
3021 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
3022 outer_first_mpls_over_gre),
3023 &ib_spec->mpls.mask.tag,
3024 sizeof(ib_spec->mpls.mask.tag));
3025 break;
3026 default:
3027 if (ib_spec->type & IB_FLOW_SPEC_INNER) {
3028 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
3029 ft_field_support.inner_first_mpls),
3030 &ib_spec->mpls.mask.tag))
3031 return -EOPNOTSUPP;
3032
3033 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
3034 inner_first_mpls),
3035 &ib_spec->mpls.val.tag,
3036 sizeof(ib_spec->mpls.val.tag));
3037 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
3038 inner_first_mpls),
3039 &ib_spec->mpls.mask.tag,
3040 sizeof(ib_spec->mpls.mask.tag));
3041 } else {
3042 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
3043 ft_field_support.outer_first_mpls),
3044 &ib_spec->mpls.mask.tag))
3045 return -EOPNOTSUPP;
3046
3047 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
3048 outer_first_mpls),
3049 &ib_spec->mpls.val.tag,
3050 sizeof(ib_spec->mpls.val.tag));
3051 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
3052 outer_first_mpls),
3053 &ib_spec->mpls.mask.tag,
3054 sizeof(ib_spec->mpls.mask.tag));
3055 }
3056 }
3057 break;
ffb30d8f
MR
3058 case IB_FLOW_SPEC_VXLAN_TUNNEL:
3059 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
3060 LAST_TUNNEL_FIELD))
1ffd3a26 3061 return -EOPNOTSUPP;
ffb30d8f
MR
3062
3063 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
3064 ntohl(ib_spec->tunnel.mask.tunnel_id));
3065 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
3066 ntohl(ib_spec->tunnel.val.tunnel_id));
3067 break;
2ac693f9
MR
3068 case IB_FLOW_SPEC_ACTION_TAG:
3069 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
3070 LAST_FLOW_TAG_FIELD))
3071 return -EOPNOTSUPP;
3072 if (ib_spec->flow_tag.tag_id >= BIT(24))
3073 return -EINVAL;
3074
bb0ee7dc
JL
3075 flow_context->flow_tag = ib_spec->flow_tag.tag_id;
3076 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
2ac693f9 3077 break;
a22ed86c
SS
3078 case IB_FLOW_SPEC_ACTION_DROP:
3079 if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
3080 LAST_DROP_FIELD))
3081 return -EOPNOTSUPP;
075572d4 3082 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
a22ed86c 3083 break;
802c2125 3084 case IB_FLOW_SPEC_ACTION_HANDLE:
2ea26203
MB
3085 ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
3086 flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
802c2125
AY
3087 if (ret)
3088 return ret;
3089 break;
3b3233fb
RS
3090 case IB_FLOW_SPEC_ACTION_COUNT:
3091 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
3092 LAST_COUNTERS_FIELD))
3093 return -EOPNOTSUPP;
3094
3095 /* for now support only one counters spec per flow */
3096 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
3097 return -EINVAL;
3098
3099 action->counters = ib_spec->flow_count.counters;
3100 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3101 break;
038d2ef8
MG
3102 default:
3103 return -EINVAL;
3104 }
3105
3106 return 0;
3107}
3108
3109/* If a flow could catch both multicast and unicast packets,
3110 * it won't fall into the multicast flow steering table and this rule
3111 * could steal other multicast packets.
3112 */
a550ddfc 3113static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
038d2ef8 3114{
81e30880 3115 union ib_flow_spec *flow_spec;
038d2ef8
MG
3116
3117 if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
038d2ef8
MG
3118 ib_attr->num_of_specs < 1)
3119 return false;
3120
81e30880
YH
3121 flow_spec = (union ib_flow_spec *)(ib_attr + 1);
3122 if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
3123 struct ib_flow_spec_ipv4 *ipv4_spec;
3124
3125 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
3126 if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
3127 return true;
3128
038d2ef8 3129 return false;
81e30880
YH
3130 }
3131
3132 if (flow_spec->type == IB_FLOW_SPEC_ETH) {
3133 struct ib_flow_spec_eth *eth_spec;
3134
3135 eth_spec = (struct ib_flow_spec_eth *)flow_spec;
3136 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
3137 is_multicast_ether_addr(eth_spec->val.dst_mac);
3138 }
038d2ef8 3139
81e30880 3140 return false;
038d2ef8
MG
3141}
3142
802c2125
AY
3143enum valid_spec {
3144 VALID_SPEC_INVALID,
3145 VALID_SPEC_VALID,
3146 VALID_SPEC_NA,
3147};
3148
3149static enum valid_spec
3150is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
3151 const struct mlx5_flow_spec *spec,
3152 const struct mlx5_flow_act *flow_act,
3153 bool egress)
3154{
3155 const u32 *match_c = spec->match_criteria;
3156 bool is_crypto =
3157 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
3158 MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
3159 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
3160 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
3161
3162 /*
3163 * Currently only crypto is supported in egress, when regular egress
3164 * rules would be supported, always return VALID_SPEC_NA.
3165 */
3166 if (!is_crypto)
78dd0c43 3167 return VALID_SPEC_NA;
802c2125
AY
3168
3169 return is_crypto && is_ipsec &&
bb0ee7dc
JL
3170 (!egress || (!is_drop &&
3171 !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
802c2125
AY
3172 VALID_SPEC_VALID : VALID_SPEC_INVALID;
3173}
3174
3175static bool is_valid_spec(struct mlx5_core_dev *mdev,
3176 const struct mlx5_flow_spec *spec,
3177 const struct mlx5_flow_act *flow_act,
3178 bool egress)
3179{
3180 /* We curretly only support ipsec egress flow */
3181 return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
3182}
3183
19cc7524
AL
3184static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
3185 const struct ib_flow_attr *flow_attr,
0f750966 3186 bool check_inner)
038d2ef8
MG
3187{
3188 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
19cc7524
AL
3189 int match_ipv = check_inner ?
3190 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
3191 ft_field_support.inner_ip_version) :
3192 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
3193 ft_field_support.outer_ip_version);
0f750966
AL
3194 int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
3195 bool ipv4_spec_valid, ipv6_spec_valid;
3196 unsigned int ip_spec_type = 0;
3197 bool has_ethertype = false;
038d2ef8 3198 unsigned int spec_index;
0f750966
AL
3199 bool mask_valid = true;
3200 u16 eth_type = 0;
3201 bool type_valid;
038d2ef8
MG
3202
3203 /* Validate that ethertype is correct */
3204 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
0f750966 3205 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
038d2ef8 3206 ib_spec->eth.mask.ether_type) {
0f750966
AL
3207 mask_valid = (ib_spec->eth.mask.ether_type ==
3208 htons(0xffff));
3209 has_ethertype = true;
3210 eth_type = ntohs(ib_spec->eth.val.ether_type);
3211 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
3212 (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
3213 ip_spec_type = ib_spec->type;
038d2ef8
MG
3214 }
3215 ib_spec = (void *)ib_spec + ib_spec->size;
3216 }
0f750966
AL
3217
3218 type_valid = (!has_ethertype) || (!ip_spec_type);
3219 if (!type_valid && mask_valid) {
3220 ipv4_spec_valid = (eth_type == ETH_P_IP) &&
3221 (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
3222 ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
3223 (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
19cc7524
AL
3224
3225 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
3226 (((eth_type == ETH_P_MPLS_UC) ||
3227 (eth_type == ETH_P_MPLS_MC)) && match_ipv);
0f750966
AL
3228 }
3229
3230 return type_valid;
3231}
3232
19cc7524
AL
3233static bool is_valid_attr(struct mlx5_core_dev *mdev,
3234 const struct ib_flow_attr *flow_attr)
0f750966 3235{
19cc7524
AL
3236 return is_valid_ethertype(mdev, flow_attr, false) &&
3237 is_valid_ethertype(mdev, flow_attr, true);
038d2ef8
MG
3238}
3239
3240static void put_flow_table(struct mlx5_ib_dev *dev,
3241 struct mlx5_ib_flow_prio *prio, bool ft_added)
3242{
3243 prio->refcount -= !!ft_added;
3244 if (!prio->refcount) {
3245 mlx5_destroy_flow_table(prio->flow_table);
3246 prio->flow_table = NULL;
3247 }
3248}
3249
3b3233fb
RS
3250static void counters_clear_description(struct ib_counters *counters)
3251{
3252 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3253
3254 mutex_lock(&mcounters->mcntrs_mutex);
3255 kfree(mcounters->counters_data);
3256 mcounters->counters_data = NULL;
3257 mcounters->cntrs_max_index = 0;
3258 mutex_unlock(&mcounters->mcntrs_mutex);
3259}
3260
038d2ef8
MG
3261static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
3262{
038d2ef8
MG
3263 struct mlx5_ib_flow_handler *handler = container_of(flow_id,
3264 struct mlx5_ib_flow_handler,
3265 ibflow);
3266 struct mlx5_ib_flow_handler *iter, *tmp;
d4be3f44 3267 struct mlx5_ib_dev *dev = handler->dev;
038d2ef8 3268
9a4ca38d 3269 mutex_lock(&dev->flow_db->lock);
038d2ef8
MG
3270
3271 list_for_each_entry_safe(iter, tmp, &handler->list, list) {
74491de9 3272 mlx5_del_flow_rules(iter->rule);
cc0e5d42 3273 put_flow_table(dev, iter->prio, true);
038d2ef8
MG
3274 list_del(&iter->list);
3275 kfree(iter);
3276 }
3277
74491de9 3278 mlx5_del_flow_rules(handler->rule);
5497adc6 3279 put_flow_table(dev, handler->prio, true);
3b3233fb
RS
3280 if (handler->ibcounters &&
3281 atomic_read(&handler->ibcounters->usecnt) == 1)
3282 counters_clear_description(handler->ibcounters);
038d2ef8 3283
3b3233fb 3284 mutex_unlock(&dev->flow_db->lock);
d4be3f44
YH
3285 if (handler->flow_matcher)
3286 atomic_dec(&handler->flow_matcher->usecnt);
038d2ef8
MG
3287 kfree(handler);
3288
3289 return 0;
3290}
3291
35d19011
MG
3292static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
3293{
3294 priority *= 2;
3295 if (!dont_trap)
3296 priority++;
3297 return priority;
3298}
3299
cc0e5d42
MG
3300enum flow_table_type {
3301 MLX5_IB_FT_RX,
3302 MLX5_IB_FT_TX
3303};
3304
00b7c2ab
MG
3305#define MLX5_FS_MAX_TYPES 6
3306#define MLX5_FS_MAX_ENTRIES BIT(16)
d4be3f44
YH
3307
3308static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
3309 struct mlx5_ib_flow_prio *prio,
3310 int priority,
4adda112
MB
3311 int num_entries, int num_groups,
3312 u32 flags)
d4be3f44 3313{
61dc7b01 3314 struct mlx5_flow_table_attr ft_attr = {};
d4be3f44
YH
3315 struct mlx5_flow_table *ft;
3316
61dc7b01
PB
3317 ft_attr.prio = priority;
3318 ft_attr.max_fte = num_entries;
3319 ft_attr.flags = flags;
3320 ft_attr.autogroup.max_num_groups = num_groups;
3321 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
d4be3f44
YH
3322 if (IS_ERR(ft))
3323 return ERR_CAST(ft);
3324
3325 prio->flow_table = ft;
3326 prio->refcount = 0;
3327 return prio;
3328}
3329
038d2ef8 3330static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
cc0e5d42
MG
3331 struct ib_flow_attr *flow_attr,
3332 enum flow_table_type ft_type)
038d2ef8 3333{
35d19011 3334 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
038d2ef8
MG
3335 struct mlx5_flow_namespace *ns = NULL;
3336 struct mlx5_ib_flow_prio *prio;
3337 struct mlx5_flow_table *ft;
dac388ef 3338 int max_table_size;
038d2ef8
MG
3339 int num_entries;
3340 int num_groups;
cecae747 3341 bool esw_encap;
4adda112 3342 u32 flags = 0;
038d2ef8 3343 int priority;
038d2ef8 3344
dac388ef
MG
3345 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3346 log_max_ft_size));
cecae747
MG
3347 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
3348 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
038d2ef8 3349 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
78dd0c43
MB
3350 enum mlx5_flow_namespace_type fn_type;
3351
3352 if (flow_is_multicast_only(flow_attr) &&
3353 !dont_trap)
038d2ef8
MG
3354 priority = MLX5_IB_FLOW_MCAST_PRIO;
3355 else
35d19011
MG
3356 priority = ib_prio_to_core_prio(flow_attr->priority,
3357 dont_trap);
78dd0c43
MB
3358 if (ft_type == MLX5_IB_FT_RX) {
3359 fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
3360 prio = &dev->flow_db->prios[priority];
cecae747 3361 if (!dev->is_rep && !esw_encap &&
4adda112
MB
3362 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3363 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
cecae747 3364 if (!dev->is_rep && !esw_encap &&
5c2db53f
MB
3365 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3366 reformat_l3_tunnel_to_l2))
3367 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
78dd0c43
MB
3368 } else {
3369 max_table_size =
3370 BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
3371 log_max_ft_size));
3372 fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
3373 prio = &dev->flow_db->egress_prios[priority];
cecae747 3374 if (!dev->is_rep && !esw_encap &&
4adda112
MB
3375 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3376 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
78dd0c43
MB
3377 }
3378 ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
038d2ef8
MG
3379 num_entries = MLX5_FS_MAX_ENTRIES;
3380 num_groups = MLX5_FS_MAX_TYPES;
038d2ef8
MG
3381 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3382 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3383 ns = mlx5_get_flow_namespace(dev->mdev,
3384 MLX5_FLOW_NAMESPACE_LEFTOVERS);
3385 build_leftovers_ft_param(&priority,
3386 &num_entries,
3387 &num_groups);
9a4ca38d 3388 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
cc0e5d42
MG
3389 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3390 if (!MLX5_CAP_FLOWTABLE(dev->mdev,
3391 allow_sniffer_and_nic_rx_shared_tir))
3392 return ERR_PTR(-ENOTSUPP);
3393
3394 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
3395 MLX5_FLOW_NAMESPACE_SNIFFER_RX :
3396 MLX5_FLOW_NAMESPACE_SNIFFER_TX);
3397
9a4ca38d 3398 prio = &dev->flow_db->sniffer[ft_type];
cc0e5d42
MG
3399 priority = 0;
3400 num_entries = 1;
3401 num_groups = 1;
038d2ef8
MG
3402 }
3403
3404 if (!ns)
3405 return ERR_PTR(-ENOTSUPP);
3406
3b70508a 3407 max_table_size = min_t(int, num_entries, max_table_size);
dac388ef 3408
038d2ef8 3409 ft = prio->flow_table;
d4be3f44 3410 if (!ft)
3b70508a 3411 return _get_prio(ns, prio, priority, max_table_size, num_groups,
4adda112 3412 flags);
038d2ef8 3413
d4be3f44 3414 return prio;
038d2ef8
MG
3415}
3416
a550ddfc
YH
3417static void set_underlay_qp(struct mlx5_ib_dev *dev,
3418 struct mlx5_flow_spec *spec,
3419 u32 underlay_qpn)
3420{
3421 void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
3422 spec->match_criteria,
3423 misc_parameters);
3424 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3425 misc_parameters);
3426
3427 if (underlay_qpn &&
3428 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3429 ft_field_support.bth_dst_qp)) {
3430 MLX5_SET(fte_match_set_misc,
3431 misc_params_v, bth_dst_qp, underlay_qpn);
3432 MLX5_SET(fte_match_set_misc,
3433 misc_params_c, bth_dst_qp, 0xffffff);
3434 }
3435}
3436
5e95af5f
RS
3437static int read_flow_counters(struct ib_device *ibdev,
3438 struct mlx5_read_counters_attr *read_attr)
3439{
3440 struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
3441 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3442
3443 return mlx5_fc_query(dev->mdev, fc,
3444 &read_attr->out[IB_COUNTER_PACKETS],
3445 &read_attr->out[IB_COUNTER_BYTES]);
3446}
3447
3448/* flow counters currently expose two counters packets and bytes */
3449#define FLOW_COUNTERS_NUM 2
3b3233fb
RS
3450static int counters_set_description(struct ib_counters *counters,
3451 enum mlx5_ib_counters_type counters_type,
3452 struct mlx5_ib_flow_counters_desc *desc_data,
3453 u32 ncounters)
3454{
3455 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3456 u32 cntrs_max_index = 0;
3457 int i;
3458
3459 if (counters_type != MLX5_IB_COUNTERS_FLOW)
3460 return -EINVAL;
3461
3462 /* init the fields for the object */
3463 mcounters->type = counters_type;
5e95af5f
RS
3464 mcounters->read_counters = read_flow_counters;
3465 mcounters->counters_num = FLOW_COUNTERS_NUM;
3b3233fb
RS
3466 mcounters->ncounters = ncounters;
3467 /* each counter entry have both description and index pair */
3468 for (i = 0; i < ncounters; i++) {
3469 if (desc_data[i].description > IB_COUNTER_BYTES)
3470 return -EINVAL;
3471
3472 if (cntrs_max_index <= desc_data[i].index)
3473 cntrs_max_index = desc_data[i].index + 1;
3474 }
3475
3476 mutex_lock(&mcounters->mcntrs_mutex);
3477 mcounters->counters_data = desc_data;
3478 mcounters->cntrs_max_index = cntrs_max_index;
3479 mutex_unlock(&mcounters->mcntrs_mutex);
3480
3481 return 0;
3482}
3483
3484#define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
3485static int flow_counters_set_data(struct ib_counters *ibcounters,
3486 struct mlx5_ib_create_flow *ucmd)
3487{
3488 struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
3489 struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
3490 struct mlx5_ib_flow_counters_desc *desc_data = NULL;
3491 bool hw_hndl = false;
3492 int ret = 0;
3493
3494 if (ucmd && ucmd->ncounters_data != 0) {
3495 cntrs_data = ucmd->data;
3496 if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
3497 return -EINVAL;
3498
3499 desc_data = kcalloc(cntrs_data->ncounters,
3500 sizeof(*desc_data),
3501 GFP_KERNEL);
3502 if (!desc_data)
3503 return -ENOMEM;
3504
3505 if (copy_from_user(desc_data,
3506 u64_to_user_ptr(cntrs_data->counters_data),
3507 sizeof(*desc_data) * cntrs_data->ncounters)) {
3508 ret = -EFAULT;
3509 goto free;
3510 }
3511 }
3512
3513 if (!mcounters->hw_cntrs_hndl) {
3514 mcounters->hw_cntrs_hndl = mlx5_fc_create(
3515 to_mdev(ibcounters->device)->mdev, false);
e31abf76 3516 if (IS_ERR(mcounters->hw_cntrs_hndl)) {
3517 ret = PTR_ERR(mcounters->hw_cntrs_hndl);
3b3233fb
RS
3518 goto free;
3519 }
3520 hw_hndl = true;
3521 }
3522
3523 if (desc_data) {
3524 /* counters already bound to at least one flow */
3525 if (mcounters->cntrs_max_index) {
3526 ret = -EINVAL;
3527 goto free_hndl;
3528 }
3529
3530 ret = counters_set_description(ibcounters,
3531 MLX5_IB_COUNTERS_FLOW,
3532 desc_data,
3533 cntrs_data->ncounters);
3534 if (ret)
3535 goto free_hndl;
3536
3537 } else if (!mcounters->cntrs_max_index) {
3538 /* counters not bound yet, must have udata passed */
3539 ret = -EINVAL;
3540 goto free_hndl;
3541 }
3542
3543 return 0;
3544
3545free_hndl:
3546 if (hw_hndl) {
3547 mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
3548 mcounters->hw_cntrs_hndl);
3549 mcounters->hw_cntrs_hndl = NULL;
3550 }
3551free:
3552 kfree(desc_data);
3553 return ret;
3554}
3555
669ff1e3
JL
3556static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
3557 struct mlx5_flow_spec *spec,
3558 struct mlx5_eswitch_rep *rep)
3559{
3560 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
3561 void *misc;
3562
3563 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
3564 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3565 misc_parameters_2);
3566
3567 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
3568 mlx5_eswitch_get_vport_metadata_for_match(esw,
3569 rep->vport));
3570 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3571 misc_parameters_2);
3572
3573 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
3574 } else {
3575 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3576 misc_parameters);
3577
3578 MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
3579
3580 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3581 misc_parameters);
3582
3583 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
3584 }
3585}
3586
a550ddfc
YH
3587static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3588 struct mlx5_ib_flow_prio *ft_prio,
3589 const struct ib_flow_attr *flow_attr,
3590 struct mlx5_flow_destination *dst,
3b3233fb
RS
3591 u32 underlay_qpn,
3592 struct mlx5_ib_create_flow *ucmd)
038d2ef8
MG
3593{
3594 struct mlx5_flow_table *ft = ft_prio->flow_table;
3595 struct mlx5_ib_flow_handler *handler;
bb0ee7dc 3596 struct mlx5_flow_act flow_act = {};
c5bb1730 3597 struct mlx5_flow_spec *spec;
3b3233fb
RS
3598 struct mlx5_flow_destination dest_arr[2] = {};
3599 struct mlx5_flow_destination *rule_dst = dest_arr;
dd063d0e 3600 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
038d2ef8 3601 unsigned int spec_index;
71c6e863 3602 u32 prev_type = 0;
038d2ef8 3603 int err = 0;
3b3233fb 3604 int dest_num = 0;
802c2125 3605 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
038d2ef8 3606
19cc7524 3607 if (!is_valid_attr(dev->mdev, flow_attr))
038d2ef8
MG
3608 return ERR_PTR(-EINVAL);
3609
6a4d00be 3610 if (dev->is_rep && is_egress)
78dd0c43
MB
3611 return ERR_PTR(-EINVAL);
3612
1b9a07ee 3613 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
038d2ef8 3614 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
c5bb1730 3615 if (!handler || !spec) {
038d2ef8
MG
3616 err = -ENOMEM;
3617 goto free;
3618 }
3619
3620 INIT_LIST_HEAD(&handler->list);
3621
3622 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
bb0ee7dc 3623 err = parse_flow_attr(dev->mdev, spec,
71c6e863
AL
3624 ib_flow, flow_attr, &flow_act,
3625 prev_type);
038d2ef8
MG
3626 if (err < 0)
3627 goto free;
3628
71c6e863 3629 prev_type = ((union ib_flow_spec *)ib_flow)->type;
038d2ef8
MG
3630 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
3631 }
3632
ed9085fe
MG
3633 if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) {
3634 memcpy(&dest_arr[0], dst, sizeof(*dst));
3635 dest_num++;
3636 }
3637
a550ddfc
YH
3638 if (!flow_is_multicast_only(flow_attr))
3639 set_underlay_qp(dev, spec, underlay_qpn);
3640
6a4d00be 3641 if (dev->is_rep) {
669ff1e3 3642 struct mlx5_eswitch_rep *rep;
018a94ee 3643
669ff1e3
JL
3644 rep = dev->port[flow_attr->port - 1].rep;
3645 if (!rep) {
6a4d00be
MB
3646 err = -EINVAL;
3647 goto free;
3648 }
669ff1e3
JL
3649
3650 mlx5_ib_set_rule_source_port(dev, spec, rep);
018a94ee
MB
3651 }
3652
466fa6d2 3653 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
802c2125
AY
3654
3655 if (is_egress &&
3656 !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
3657 err = -EINVAL;
3658 goto free;
3659 }
3660
3b3233fb 3661 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
171c7625
MB
3662 struct mlx5_ib_mcounters *mcounters;
3663
3b3233fb
RS
3664 err = flow_counters_set_data(flow_act.counters, ucmd);
3665 if (err)
3666 goto free;
3667
171c7625 3668 mcounters = to_mcounters(flow_act.counters);
3b3233fb
RS
3669 handler->ibcounters = flow_act.counters;
3670 dest_arr[dest_num].type =
3671 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625
MB
3672 dest_arr[dest_num].counter_id =
3673 mlx5_fc_id(mcounters->hw_cntrs_hndl);
3b3233fb
RS
3674 dest_num++;
3675 }
3676
075572d4 3677 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
ed9085fe 3678 if (!dest_num)
3b3233fb 3679 rule_dst = NULL;
a22ed86c 3680 } else {
802c2125
AY
3681 if (is_egress)
3682 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
3683 else
3684 flow_act.action |=
3b3233fb 3685 dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
802c2125 3686 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
a22ed86c 3687 }
2ac693f9 3688
bb0ee7dc 3689 if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) &&
2ac693f9
MR
3690 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3691 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3692 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
bb0ee7dc 3693 spec->flow_context.flow_tag, flow_attr->type);
2ac693f9
MR
3694 err = -EINVAL;
3695 goto free;
3696 }
74491de9 3697 handler->rule = mlx5_add_flow_rules(ft, spec,
66958ed9 3698 &flow_act,
a22ed86c 3699 rule_dst, dest_num);
038d2ef8
MG
3700
3701 if (IS_ERR(handler->rule)) {
3702 err = PTR_ERR(handler->rule);
3703 goto free;
3704 }
3705
d9d4980a 3706 ft_prio->refcount++;
5497adc6 3707 handler->prio = ft_prio;
d4be3f44 3708 handler->dev = dev;
038d2ef8
MG
3709
3710 ft_prio->flow_table = ft;
3711free:
3b3233fb
RS
3712 if (err && handler) {
3713 if (handler->ibcounters &&
3714 atomic_read(&handler->ibcounters->usecnt) == 1)
3715 counters_clear_description(handler->ibcounters);
038d2ef8 3716 kfree(handler);
3b3233fb 3717 }
c5bb1730 3718 kvfree(spec);
038d2ef8
MG
3719 return err ? ERR_PTR(err) : handler;
3720}
3721
a550ddfc
YH
3722static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
3723 struct mlx5_ib_flow_prio *ft_prio,
3724 const struct ib_flow_attr *flow_attr,
3725 struct mlx5_flow_destination *dst)
3726{
3b3233fb 3727 return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
a550ddfc
YH
3728}
3729
35d19011
MG
3730static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
3731 struct mlx5_ib_flow_prio *ft_prio,
3732 struct ib_flow_attr *flow_attr,
3733 struct mlx5_flow_destination *dst)
3734{
3735 struct mlx5_ib_flow_handler *handler_dst = NULL;
3736 struct mlx5_ib_flow_handler *handler = NULL;
3737
3738 handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
3739 if (!IS_ERR(handler)) {
3740 handler_dst = create_flow_rule(dev, ft_prio,
3741 flow_attr, dst);
3742 if (IS_ERR(handler_dst)) {
74491de9 3743 mlx5_del_flow_rules(handler->rule);
d9d4980a 3744 ft_prio->refcount--;
35d19011
MG
3745 kfree(handler);
3746 handler = handler_dst;
3747 } else {
3748 list_add(&handler_dst->list, &handler->list);
3749 }
3750 }
3751
3752 return handler;
3753}
038d2ef8
MG
3754enum {
3755 LEFTOVERS_MC,
3756 LEFTOVERS_UC,
3757};
3758
3759static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
3760 struct mlx5_ib_flow_prio *ft_prio,
3761 struct ib_flow_attr *flow_attr,
3762 struct mlx5_flow_destination *dst)
3763{
3764 struct mlx5_ib_flow_handler *handler_ucast = NULL;
3765 struct mlx5_ib_flow_handler *handler = NULL;
3766
3767 static struct {
3768 struct ib_flow_attr flow_attr;
3769 struct ib_flow_spec_eth eth_flow;
3770 } leftovers_specs[] = {
3771 [LEFTOVERS_MC] = {
3772 .flow_attr = {
3773 .num_of_specs = 1,
3774 .size = sizeof(leftovers_specs[0])
3775 },
3776 .eth_flow = {
3777 .type = IB_FLOW_SPEC_ETH,
3778 .size = sizeof(struct ib_flow_spec_eth),
3779 .mask = {.dst_mac = {0x1} },
3780 .val = {.dst_mac = {0x1} }
3781 }
3782 },
3783 [LEFTOVERS_UC] = {
3784 .flow_attr = {
3785 .num_of_specs = 1,
3786 .size = sizeof(leftovers_specs[0])
3787 },
3788 .eth_flow = {
3789 .type = IB_FLOW_SPEC_ETH,
3790 .size = sizeof(struct ib_flow_spec_eth),
3791 .mask = {.dst_mac = {0x1} },
3792 .val = {.dst_mac = {} }
3793 }
3794 }
3795 };
3796
3797 handler = create_flow_rule(dev, ft_prio,
3798 &leftovers_specs[LEFTOVERS_MC].flow_attr,
3799 dst);
3800 if (!IS_ERR(handler) &&
3801 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
3802 handler_ucast = create_flow_rule(dev, ft_prio,
3803 &leftovers_specs[LEFTOVERS_UC].flow_attr,
3804 dst);
3805 if (IS_ERR(handler_ucast)) {
74491de9 3806 mlx5_del_flow_rules(handler->rule);
d9d4980a 3807 ft_prio->refcount--;
038d2ef8
MG
3808 kfree(handler);
3809 handler = handler_ucast;
3810 } else {
3811 list_add(&handler_ucast->list, &handler->list);
3812 }
3813 }
3814
3815 return handler;
3816}
3817
cc0e5d42
MG
3818static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
3819 struct mlx5_ib_flow_prio *ft_rx,
3820 struct mlx5_ib_flow_prio *ft_tx,
3821 struct mlx5_flow_destination *dst)
3822{
3823 struct mlx5_ib_flow_handler *handler_rx;
3824 struct mlx5_ib_flow_handler *handler_tx;
3825 int err;
3826 static const struct ib_flow_attr flow_attr = {
3827 .num_of_specs = 0,
3828 .size = sizeof(flow_attr)
3829 };
3830
3831 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
3832 if (IS_ERR(handler_rx)) {
3833 err = PTR_ERR(handler_rx);
3834 goto err;
3835 }
3836
3837 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
3838 if (IS_ERR(handler_tx)) {
3839 err = PTR_ERR(handler_tx);
3840 goto err_tx;
3841 }
3842
3843 list_add(&handler_tx->list, &handler_rx->list);
3844
3845 return handler_rx;
3846
3847err_tx:
74491de9 3848 mlx5_del_flow_rules(handler_rx->rule);
cc0e5d42
MG
3849 ft_rx->refcount--;
3850 kfree(handler_rx);
3851err:
3852 return ERR_PTR(err);
3853}
3854
038d2ef8
MG
3855static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3856 struct ib_flow_attr *flow_attr,
59082a32
MB
3857 int domain,
3858 struct ib_udata *udata)
038d2ef8
MG
3859{
3860 struct mlx5_ib_dev *dev = to_mdev(qp->device);
d9f88e5a 3861 struct mlx5_ib_qp *mqp = to_mqp(qp);
038d2ef8
MG
3862 struct mlx5_ib_flow_handler *handler = NULL;
3863 struct mlx5_flow_destination *dst = NULL;
cc0e5d42 3864 struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
038d2ef8 3865 struct mlx5_ib_flow_prio *ft_prio;
802c2125 3866 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3b3233fb
RS
3867 struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
3868 size_t min_ucmd_sz, required_ucmd_sz;
038d2ef8 3869 int err;
a550ddfc 3870 int underlay_qpn;
038d2ef8 3871
3b3233fb
RS
3872 if (udata && udata->inlen) {
3873 min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
3874 sizeof(ucmd_hdr.reserved);
3875 if (udata->inlen < min_ucmd_sz)
3876 return ERR_PTR(-EOPNOTSUPP);
3877
3878 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
3879 if (err)
3880 return ERR_PTR(err);
3881
3882 /* currently supports only one counters data */
3883 if (ucmd_hdr.ncounters_data > 1)
3884 return ERR_PTR(-EINVAL);
3885
3886 required_ucmd_sz = min_ucmd_sz +
3887 sizeof(struct mlx5_ib_flow_counters_data) *
3888 ucmd_hdr.ncounters_data;
3889 if (udata->inlen > required_ucmd_sz &&
3890 !ib_is_udata_cleared(udata, required_ucmd_sz,
3891 udata->inlen - required_ucmd_sz))
3892 return ERR_PTR(-EOPNOTSUPP);
3893
3894 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
3895 if (!ucmd)
3896 return ERR_PTR(-ENOMEM);
3897
3898 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
299eafee
GS
3899 if (err)
3900 goto free_ucmd;
3b3233fb 3901 }
59082a32 3902
299eafee
GS
3903 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
3904 err = -ENOMEM;
3905 goto free_ucmd;
3906 }
038d2ef8
MG
3907
3908 if (domain != IB_FLOW_DOMAIN_USER ||
508562d6 3909 flow_attr->port > dev->num_ports ||
802c2125 3910 (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
299eafee
GS
3911 IB_FLOW_ATTR_FLAGS_EGRESS))) {
3912 err = -EINVAL;
3913 goto free_ucmd;
3914 }
802c2125
AY
3915
3916 if (is_egress &&
3917 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
299eafee
GS
3918 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3919 err = -EINVAL;
3920 goto free_ucmd;
3921 }
038d2ef8
MG
3922
3923 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
299eafee
GS
3924 if (!dst) {
3925 err = -ENOMEM;
3926 goto free_ucmd;
3927 }
038d2ef8 3928
9a4ca38d 3929 mutex_lock(&dev->flow_db->lock);
038d2ef8 3930
802c2125
AY
3931 ft_prio = get_flow_table(dev, flow_attr,
3932 is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
038d2ef8
MG
3933 if (IS_ERR(ft_prio)) {
3934 err = PTR_ERR(ft_prio);
3935 goto unlock;
3936 }
cc0e5d42
MG
3937 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3938 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
3939 if (IS_ERR(ft_prio_tx)) {
3940 err = PTR_ERR(ft_prio_tx);
3941 ft_prio_tx = NULL;
3942 goto destroy_ft;
3943 }
3944 }
038d2ef8 3945
802c2125
AY
3946 if (is_egress) {
3947 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
3948 } else {
3949 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
3950 if (mqp->flags & MLX5_IB_QP_RSS)
3951 dst->tir_num = mqp->rss_qp.tirn;
3952 else
3953 dst->tir_num = mqp->raw_packet_qp.rq.tirn;
3954 }
038d2ef8
MG
3955
3956 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
35d19011
MG
3957 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
3958 handler = create_dont_trap_rule(dev, ft_prio,
3959 flow_attr, dst);
3960 } else {
a550ddfc
YH
3961 underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
3962 mqp->underlay_qpn : 0;
3963 handler = _create_flow_rule(dev, ft_prio, flow_attr,
3b3233fb 3964 dst, underlay_qpn, ucmd);
35d19011 3965 }
038d2ef8
MG
3966 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3967 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3968 handler = create_leftovers_rule(dev, ft_prio, flow_attr,
3969 dst);
cc0e5d42
MG
3970 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3971 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
038d2ef8
MG
3972 } else {
3973 err = -EINVAL;
3974 goto destroy_ft;
3975 }
3976
3977 if (IS_ERR(handler)) {
3978 err = PTR_ERR(handler);
3979 handler = NULL;
3980 goto destroy_ft;
3981 }
3982
9a4ca38d 3983 mutex_unlock(&dev->flow_db->lock);
038d2ef8 3984 kfree(dst);
3b3233fb 3985 kfree(ucmd);
038d2ef8
MG
3986
3987 return &handler->ibflow;
3988
3989destroy_ft:
3990 put_flow_table(dev, ft_prio, false);
cc0e5d42
MG
3991 if (ft_prio_tx)
3992 put_flow_table(dev, ft_prio_tx, false);
038d2ef8 3993unlock:
9a4ca38d 3994 mutex_unlock(&dev->flow_db->lock);
038d2ef8 3995 kfree(dst);
299eafee 3996free_ucmd:
3b3233fb 3997 kfree(ucmd);
038d2ef8
MG
3998 return ERR_PTR(err);
3999}
4000
b47fd4ff
MB
4001static struct mlx5_ib_flow_prio *
4002_get_flow_table(struct mlx5_ib_dev *dev,
4003 struct mlx5_ib_flow_matcher *fs_matcher,
4004 bool mcast)
d4be3f44 4005{
d4be3f44 4006 struct mlx5_flow_namespace *ns = NULL;
13a43765
MB
4007 struct mlx5_ib_flow_prio *prio = NULL;
4008 int max_table_size = 0;
cecae747 4009 bool esw_encap;
b47fd4ff
MB
4010 u32 flags = 0;
4011 int priority;
4012
13a43765
MB
4013 if (mcast)
4014 priority = MLX5_IB_FLOW_MCAST_PRIO;
4015 else
4016 priority = ib_prio_to_core_prio(fs_matcher->priority, false);
4017
cecae747
MG
4018 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
4019 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
b47fd4ff
MB
4020 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
4021 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
4022 log_max_ft_size));
cecae747 4023 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap)
b47fd4ff
MB
4024 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
4025 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
cecae747
MG
4026 reformat_l3_tunnel_to_l2) &&
4027 !esw_encap)
b47fd4ff 4028 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
13a43765
MB
4029 } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
4030 max_table_size = BIT(
4031 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
cecae747 4032 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && !esw_encap)
b47fd4ff 4033 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
13a43765
MB
4034 } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
4035 max_table_size = BIT(
4036 MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
09d985be
MG
4037 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap)
4038 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
4039 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, reformat_l3_tunnel_to_l2) &&
4040 esw_encap)
4041 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
13a43765 4042 priority = FDB_BYPASS_PATH;
d8abe884
MZ
4043 } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
4044 max_table_size =
4045 BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
4046 log_max_ft_size));
4047 priority = fs_matcher->priority;
b47fd4ff 4048 }
d4be3f44 4049
3b70508a 4050 max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
d4be3f44 4051
b47fd4ff 4052 ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
d4be3f44
YH
4053 if (!ns)
4054 return ERR_PTR(-ENOTSUPP);
4055
b47fd4ff
MB
4056 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
4057 prio = &dev->flow_db->prios[priority];
13a43765 4058 else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
b47fd4ff 4059 prio = &dev->flow_db->egress_prios[priority];
13a43765
MB
4060 else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB)
4061 prio = &dev->flow_db->fdb;
d8abe884
MZ
4062 else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX)
4063 prio = &dev->flow_db->rdma_rx[priority];
13a43765
MB
4064
4065 if (!prio)
4066 return ERR_PTR(-EINVAL);
d4be3f44
YH
4067
4068 if (prio->flow_table)
4069 return prio;
4070
3b70508a 4071 return _get_prio(ns, prio, priority, max_table_size,
b47fd4ff 4072 MLX5_FS_MAX_TYPES, flags);
d4be3f44
YH
4073}
4074
4075static struct mlx5_ib_flow_handler *
4076_create_raw_flow_rule(struct mlx5_ib_dev *dev,
4077 struct mlx5_ib_flow_prio *ft_prio,
4078 struct mlx5_flow_destination *dst,
4079 struct mlx5_ib_flow_matcher *fs_matcher,
bb0ee7dc 4080 struct mlx5_flow_context *flow_context,
b823dd6d 4081 struct mlx5_flow_act *flow_act,
bfc5d839
MB
4082 void *cmd_in, int inlen,
4083 int dst_num)
d4be3f44
YH
4084{
4085 struct mlx5_ib_flow_handler *handler;
d4be3f44
YH
4086 struct mlx5_flow_spec *spec;
4087 struct mlx5_flow_table *ft = ft_prio->flow_table;
4088 int err = 0;
4089
4090 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
4091 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
4092 if (!handler || !spec) {
4093 err = -ENOMEM;
4094 goto free;
4095 }
4096
4097 INIT_LIST_HEAD(&handler->list);
4098
4099 memcpy(spec->match_value, cmd_in, inlen);
4100 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
4101 fs_matcher->mask_len);
4102 spec->match_criteria_enable = fs_matcher->match_criteria_enable;
bb0ee7dc 4103 spec->flow_context = *flow_context;
d4be3f44 4104
d4be3f44 4105 handler->rule = mlx5_add_flow_rules(ft, spec,
bfc5d839 4106 flow_act, dst, dst_num);
d4be3f44
YH
4107
4108 if (IS_ERR(handler->rule)) {
4109 err = PTR_ERR(handler->rule);
4110 goto free;
4111 }
4112
4113 ft_prio->refcount++;
4114 handler->prio = ft_prio;
4115 handler->dev = dev;
4116 ft_prio->flow_table = ft;
4117
4118free:
4119 if (err)
4120 kfree(handler);
4121 kvfree(spec);
4122 return err ? ERR_PTR(err) : handler;
4123}
4124
4125static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
4126 void *match_v)
4127{
4128 void *match_c;
4129 void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
4130 void *dmac, *dmac_mask;
4131 void *ipv4, *ipv4_mask;
4132
4133 if (!(fs_matcher->match_criteria_enable &
4134 (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
4135 return false;
4136
4137 match_c = fs_matcher->matcher_mask.match_params;
4138 match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
4139 outer_headers);
4140 match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
4141 outer_headers);
4142
4143 dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
4144 dmac_47_16);
4145 dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
4146 dmac_47_16);
4147
4148 if (is_multicast_ether_addr(dmac) &&
4149 is_multicast_ether_addr(dmac_mask))
4150 return true;
4151
4152 ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
4153 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4154
4155 ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
4156 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4157
4158 if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
4159 ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
4160 return true;
4161
4162 return false;
4163}
4164
32269441
YH
4165struct mlx5_ib_flow_handler *
4166mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
4167 struct mlx5_ib_flow_matcher *fs_matcher,
bb0ee7dc 4168 struct mlx5_flow_context *flow_context,
b823dd6d 4169 struct mlx5_flow_act *flow_act,
bfc5d839 4170 u32 counter_id,
32269441
YH
4171 void *cmd_in, int inlen, int dest_id,
4172 int dest_type)
4173{
d4be3f44
YH
4174 struct mlx5_flow_destination *dst;
4175 struct mlx5_ib_flow_prio *ft_prio;
d4be3f44 4176 struct mlx5_ib_flow_handler *handler;
bfc5d839 4177 int dst_num = 0;
d4be3f44
YH
4178 bool mcast;
4179 int err;
4180
4181 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
4182 return ERR_PTR(-EOPNOTSUPP);
4183
4184 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
4185 return ERR_PTR(-ENOMEM);
4186
8e8aa145 4187 dst = kcalloc(2, sizeof(*dst), GFP_KERNEL);
d4be3f44
YH
4188 if (!dst)
4189 return ERR_PTR(-ENOMEM);
4190
4191 mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
4192 mutex_lock(&dev->flow_db->lock);
4193
b47fd4ff 4194 ft_prio = _get_flow_table(dev, fs_matcher, mcast);
d4be3f44
YH
4195 if (IS_ERR(ft_prio)) {
4196 err = PTR_ERR(ft_prio);
4197 goto unlock;
4198 }
4199
6346f0bf 4200 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
bfc5d839
MB
4201 dst[dst_num].type = dest_type;
4202 dst[dst_num].tir_num = dest_id;
b823dd6d 4203 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
a7ee18bd 4204 } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
bfc5d839
MB
4205 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
4206 dst[dst_num].ft_num = dest_id;
b823dd6d 4207 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
a7ee18bd 4208 } else {
bfc5d839 4209 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
a7ee18bd 4210 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
6346f0bf
YH
4211 }
4212
bfc5d839
MB
4213 dst_num++;
4214
4215 if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
4216 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
4217 dst[dst_num].counter_id = counter_id;
4218 dst_num++;
4219 }
4220
bb0ee7dc
JL
4221 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
4222 flow_context, flow_act,
bfc5d839 4223 cmd_in, inlen, dst_num);
d4be3f44
YH
4224
4225 if (IS_ERR(handler)) {
4226 err = PTR_ERR(handler);
4227 goto destroy_ft;
4228 }
4229
4230 mutex_unlock(&dev->flow_db->lock);
4231 atomic_inc(&fs_matcher->usecnt);
4232 handler->flow_matcher = fs_matcher;
4233
4234 kfree(dst);
4235
4236 return handler;
4237
4238destroy_ft:
4239 put_flow_table(dev, ft_prio, false);
4240unlock:
4241 mutex_unlock(&dev->flow_db->lock);
4242 kfree(dst);
4243
4244 return ERR_PTR(err);
32269441
YH
4245}
4246
c6475a0b
AY
4247static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
4248{
4249 u32 flags = 0;
4250
4251 if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
4252 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
4253
4254 return flags;
4255}
4256
4257#define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
4258static struct ib_flow_action *
4259mlx5_ib_create_flow_action_esp(struct ib_device *device,
4260 const struct ib_flow_action_attrs_esp *attr,
4261 struct uverbs_attr_bundle *attrs)
4262{
4263 struct mlx5_ib_dev *mdev = to_mdev(device);
4264 struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
4265 struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
4266 struct mlx5_ib_flow_action *action;
4267 u64 action_flags;
4268 u64 flags;
4269 int err = 0;
4270
bccd0622
JG
4271 err = uverbs_get_flags64(
4272 &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
4273 ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
4274 if (err)
4275 return ERR_PTR(err);
c6475a0b
AY
4276
4277 flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
4278
4279 /* We current only support a subset of the standard features. Only a
4280 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
4281 * (with overlap). Full offload mode isn't supported.
4282 */
4283 if (!attr->keymat || attr->replay || attr->encap ||
4284 attr->spi || attr->seq || attr->tfc_pad ||
4285 attr->hard_limit_pkts ||
4286 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4287 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
4288 return ERR_PTR(-EOPNOTSUPP);
4289
4290 if (attr->keymat->protocol !=
4291 IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
4292 return ERR_PTR(-EOPNOTSUPP);
4293
4294 aes_gcm = &attr->keymat->keymat.aes_gcm;
4295
4296 if (aes_gcm->icv_len != 16 ||
4297 aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
4298 return ERR_PTR(-EOPNOTSUPP);
4299
4300 action = kmalloc(sizeof(*action), GFP_KERNEL);
4301 if (!action)
4302 return ERR_PTR(-ENOMEM);
4303
4304 action->esp_aes_gcm.ib_flags = attr->flags;
4305 memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
4306 sizeof(accel_attrs.keymat.aes_gcm.aes_key));
4307 accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
4308 memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
4309 sizeof(accel_attrs.keymat.aes_gcm.salt));
4310 memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
4311 sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
4312 accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
4313 accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
4314 accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
4315
4316 accel_attrs.esn = attr->esn;
4317 if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
4318 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
4319 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
4320 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4321
4322 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
4323 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
4324
4325 action->esp_aes_gcm.ctx =
4326 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
4327 if (IS_ERR(action->esp_aes_gcm.ctx)) {
4328 err = PTR_ERR(action->esp_aes_gcm.ctx);
4329 goto err_parse;
4330 }
4331
4332 action->esp_aes_gcm.ib_flags = attr->flags;
4333
4334 return &action->ib_action;
4335
4336err_parse:
4337 kfree(action);
4338 return ERR_PTR(err);
4339}
4340
349705c1
MB
4341static int
4342mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
4343 const struct ib_flow_action_attrs_esp *attr,
4344 struct uverbs_attr_bundle *attrs)
4345{
4346 struct mlx5_ib_flow_action *maction = to_mflow_act(action);
4347 struct mlx5_accel_esp_xfrm_attrs accel_attrs;
4348 int err = 0;
4349
4350 if (attr->keymat || attr->replay || attr->encap ||
4351 attr->spi || attr->seq || attr->tfc_pad ||
4352 attr->hard_limit_pkts ||
4353 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4354 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
4355 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
4356 return -EOPNOTSUPP;
4357
4358 /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
4359 * be modified.
4360 */
4361 if (!(maction->esp_aes_gcm.ib_flags &
4362 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
4363 attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4364 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
4365 return -EINVAL;
4366
4367 memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
4368 sizeof(accel_attrs));
4369
4370 accel_attrs.esn = attr->esn;
4371 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
4372 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4373 else
4374 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4375
4376 err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
4377 &accel_attrs);
4378 if (err)
4379 return err;
4380
4381 maction->esp_aes_gcm.ib_flags &=
4382 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4383 maction->esp_aes_gcm.ib_flags |=
4384 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4385
4386 return 0;
4387}
4388
c6475a0b
AY
4389static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
4390{
4391 struct mlx5_ib_flow_action *maction = to_mflow_act(action);
4392
4393 switch (action->type) {
4394 case IB_FLOW_ACTION_ESP:
4395 /*
4396 * We only support aes_gcm by now, so we implicitly know this is
4397 * the underline crypto.
4398 */
4399 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
4400 break;
b4749bf2
MB
4401 case IB_FLOW_ACTION_UNSPECIFIED:
4402 mlx5_ib_destroy_flow_action_raw(maction);
4403 break;
c6475a0b
AY
4404 default:
4405 WARN_ON(true);
4406 break;
4407 }
4408
4409 kfree(maction);
4410 return 0;
4411}
4412
e126ba97
EC
4413static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4414{
4415 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
81e30880 4416 struct mlx5_ib_qp *mqp = to_mqp(ibqp);
e126ba97 4417 int err;
539ec982
YH
4418 u16 uid;
4419
4420 uid = ibqp->pd ?
4421 to_mpd(ibqp->pd)->uid : 0;
e126ba97 4422
81e30880
YH
4423 if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
4424 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
4425 return -EOPNOTSUPP;
4426 }
4427
539ec982 4428 err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
e126ba97
EC
4429 if (err)
4430 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
4431 ibqp->qp_num, gid->raw);
4432
4433 return err;
4434}
4435
4436static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4437{
4438 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4439 int err;
539ec982 4440 u16 uid;
e126ba97 4441
539ec982
YH
4442 uid = ibqp->pd ?
4443 to_mpd(ibqp->pd)->uid : 0;
4444 err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
e126ba97
EC
4445 if (err)
4446 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
4447 ibqp->qp_num, gid->raw);
4448
4449 return err;
4450}
4451
4452static int init_node_data(struct mlx5_ib_dev *dev)
4453{
1b5daf11 4454 int err;
e126ba97 4455
1b5daf11 4456 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
e126ba97 4457 if (err)
1b5daf11 4458 return err;
e126ba97 4459
1b5daf11 4460 dev->mdev->rev_id = dev->mdev->pdev->revision;
e126ba97 4461
1b5daf11 4462 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
e126ba97
EC
4463}
4464
508a523f
PP
4465static ssize_t fw_pages_show(struct device *device,
4466 struct device_attribute *attr, char *buf)
e126ba97
EC
4467{
4468 struct mlx5_ib_dev *dev =
54747231 4469 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
e126ba97 4470
9603b61d 4471 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
e126ba97 4472}
508a523f 4473static DEVICE_ATTR_RO(fw_pages);
e126ba97 4474
508a523f 4475static ssize_t reg_pages_show(struct device *device,
e126ba97
EC
4476 struct device_attribute *attr, char *buf)
4477{
4478 struct mlx5_ib_dev *dev =
54747231 4479 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
e126ba97 4480
6aec21f6 4481 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
e126ba97 4482}
508a523f 4483static DEVICE_ATTR_RO(reg_pages);
e126ba97 4484
508a523f
PP
4485static ssize_t hca_type_show(struct device *device,
4486 struct device_attribute *attr, char *buf)
e126ba97
EC
4487{
4488 struct mlx5_ib_dev *dev =
54747231
PP
4489 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4490
9603b61d 4491 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
e126ba97 4492}
508a523f 4493static DEVICE_ATTR_RO(hca_type);
e126ba97 4494
508a523f
PP
4495static ssize_t hw_rev_show(struct device *device,
4496 struct device_attribute *attr, char *buf)
e126ba97
EC
4497{
4498 struct mlx5_ib_dev *dev =
54747231
PP
4499 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4500
9603b61d 4501 return sprintf(buf, "%x\n", dev->mdev->rev_id);
e126ba97 4502}
508a523f 4503static DEVICE_ATTR_RO(hw_rev);
e126ba97 4504
508a523f
PP
4505static ssize_t board_id_show(struct device *device,
4506 struct device_attribute *attr, char *buf)
e126ba97
EC
4507{
4508 struct mlx5_ib_dev *dev =
54747231
PP
4509 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4510
e126ba97 4511 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
9603b61d 4512 dev->mdev->board_id);
e126ba97 4513}
508a523f 4514static DEVICE_ATTR_RO(board_id);
e126ba97 4515
508a523f
PP
4516static struct attribute *mlx5_class_attributes[] = {
4517 &dev_attr_hw_rev.attr,
4518 &dev_attr_hca_type.attr,
4519 &dev_attr_board_id.attr,
4520 &dev_attr_fw_pages.attr,
4521 &dev_attr_reg_pages.attr,
4522 NULL,
4523};
e126ba97 4524
508a523f
PP
4525static const struct attribute_group mlx5_attr_group = {
4526 .attrs = mlx5_class_attributes,
e126ba97
EC
4527};
4528
7722f47e
HE
4529static void pkey_change_handler(struct work_struct *work)
4530{
4531 struct mlx5_ib_port_resources *ports =
4532 container_of(work, struct mlx5_ib_port_resources,
4533 pkey_change_work);
4534
4535 mutex_lock(&ports->devr->mutex);
4536 mlx5_ib_gsi_pkey_change(ports->gsi);
4537 mutex_unlock(&ports->devr->mutex);
4538}
4539
89ea94a7
MG
4540static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
4541{
4542 struct mlx5_ib_qp *mqp;
4543 struct mlx5_ib_cq *send_mcq, *recv_mcq;
4544 struct mlx5_core_cq *mcq;
4545 struct list_head cq_armed_list;
4546 unsigned long flags_qp;
4547 unsigned long flags_cq;
4548 unsigned long flags;
4549
4550 INIT_LIST_HEAD(&cq_armed_list);
4551
4552 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
4553 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
4554 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
4555 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
4556 if (mqp->sq.tail != mqp->sq.head) {
4557 send_mcq = to_mcq(mqp->ibqp.send_cq);
4558 spin_lock_irqsave(&send_mcq->lock, flags_cq);
4559 if (send_mcq->mcq.comp &&
4560 mqp->ibqp.send_cq->comp_handler) {
4561 if (!send_mcq->mcq.reset_notify_added) {
4562 send_mcq->mcq.reset_notify_added = 1;
4563 list_add_tail(&send_mcq->mcq.reset_notify,
4564 &cq_armed_list);
4565 }
4566 }
4567 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
4568 }
4569 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
4570 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
4571 /* no handling is needed for SRQ */
4572 if (!mqp->ibqp.srq) {
4573 if (mqp->rq.tail != mqp->rq.head) {
4574 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
4575 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
4576 if (recv_mcq->mcq.comp &&
4577 mqp->ibqp.recv_cq->comp_handler) {
4578 if (!recv_mcq->mcq.reset_notify_added) {
4579 recv_mcq->mcq.reset_notify_added = 1;
4580 list_add_tail(&recv_mcq->mcq.reset_notify,
4581 &cq_armed_list);
4582 }
4583 }
4584 spin_unlock_irqrestore(&recv_mcq->lock,
4585 flags_cq);
4586 }
4587 }
4588 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
4589 }
4590 /*At that point all inflight post send were put to be executed as of we
4591 * lock/unlock above locks Now need to arm all involved CQs.
4592 */
4593 list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
4e0e2ea1 4594 mcq->comp(mcq, NULL);
89ea94a7
MG
4595 }
4596 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
4597}
4598
03404e8a
MG
4599static void delay_drop_handler(struct work_struct *work)
4600{
4601 int err;
4602 struct mlx5_ib_delay_drop *delay_drop =
4603 container_of(work, struct mlx5_ib_delay_drop,
4604 delay_drop_work);
4605
fe248c3a
MG
4606 atomic_inc(&delay_drop->events_cnt);
4607
03404e8a
MG
4608 mutex_lock(&delay_drop->lock);
4609 err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
4610 delay_drop->timeout);
4611 if (err) {
4612 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
4613 delay_drop->timeout);
4614 delay_drop->activate = false;
4615 }
4616 mutex_unlock(&delay_drop->lock);
4617}
4618
09e574fa
SM
4619static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4620 struct ib_event *ibev)
4621{
6cfdc7e4
AL
4622 u8 port = (eqe->data.port.port >> 4) & 0xf;
4623
09e574fa
SM
4624 switch (eqe->sub_type) {
4625 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
6cfdc7e4
AL
4626 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
4627 IB_LINK_LAYER_ETHERNET)
4628 schedule_work(&ibdev->delay_drop.delay_drop_work);
09e574fa
SM
4629 break;
4630 default: /* do nothing */
4631 return;
4632 }
4633}
4634
134e9349
SM
4635static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4636 struct ib_event *ibev)
4637{
4638 u8 port = (eqe->data.port.port >> 4) & 0xf;
4639
4640 ibev->element.port_num = port;
4641
4642 switch (eqe->sub_type) {
4643 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
4644 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
4645 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
4646 /* In RoCE, port up/down events are handled in
4647 * mlx5_netdev_event().
4648 */
4649 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
4650 IB_LINK_LAYER_ETHERNET)
4651 return -EINVAL;
4652
4653 ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
4654 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4655 break;
4656
4657 case MLX5_PORT_CHANGE_SUBTYPE_LID:
4658 ibev->event = IB_EVENT_LID_CHANGE;
4659 break;
4660
4661 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
4662 ibev->event = IB_EVENT_PKEY_CHANGE;
4663 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
4664 break;
4665
4666 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
4667 ibev->event = IB_EVENT_GID_CHANGE;
4668 break;
4669
4670 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
4671 ibev->event = IB_EVENT_CLIENT_REREGISTER;
4672 break;
4673 default:
4674 return -EINVAL;
4675 }
4676
4677 return 0;
4678}
4679
d69a24e0 4680static void mlx5_ib_handle_event(struct work_struct *_work)
e126ba97 4681{
d69a24e0
DJ
4682 struct mlx5_ib_event_work *work =
4683 container_of(_work, struct mlx5_ib_event_work, work);
4684 struct mlx5_ib_dev *ibdev;
e126ba97 4685 struct ib_event ibev;
dbaaff2a 4686 bool fatal = false;
e126ba97 4687
df097a27
SM
4688 if (work->is_slave) {
4689 ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
d69a24e0
DJ
4690 if (!ibdev)
4691 goto out;
4692 } else {
df097a27 4693 ibdev = work->dev;
d69a24e0
DJ
4694 }
4695
4696 switch (work->event) {
e126ba97 4697 case MLX5_DEV_EVENT_SYS_ERROR:
e126ba97 4698 ibev.event = IB_EVENT_DEVICE_FATAL;
89ea94a7 4699 mlx5_ib_handle_internal_error(ibdev);
134e9349 4700 ibev.element.port_num = (u8)(unsigned long)work->param;
dbaaff2a 4701 fatal = true;
e126ba97 4702 break;
134e9349
SM
4703 case MLX5_EVENT_TYPE_PORT_CHANGE:
4704 if (handle_port_change(ibdev, work->param, &ibev))
d69a24e0 4705 goto out;
e126ba97 4706 break;
09e574fa
SM
4707 case MLX5_EVENT_TYPE_GENERAL_EVENT:
4708 handle_general_event(ibdev, work->param, &ibev);
4709 /* fall through */
bdc37924 4710 default:
03404e8a 4711 goto out;
e126ba97
EC
4712 }
4713
134e9349 4714 ibev.device = &ibdev->ib_dev;
e126ba97 4715
134e9349
SM
4716 if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
4717 mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num);
03404e8a 4718 goto out;
a0c84c32
EC
4719 }
4720
e126ba97
EC
4721 if (ibdev->ib_active)
4722 ib_dispatch_event(&ibev);
dbaaff2a
EC
4723
4724 if (fatal)
4725 ibdev->ib_active = false;
03404e8a 4726out:
d69a24e0
DJ
4727 kfree(work);
4728}
4729
df097a27
SM
4730static int mlx5_ib_event(struct notifier_block *nb,
4731 unsigned long event, void *param)
d69a24e0
DJ
4732{
4733 struct mlx5_ib_event_work *work;
4734
4735 work = kmalloc(sizeof(*work), GFP_ATOMIC);
10bea9c8 4736 if (!work)
df097a27 4737 return NOTIFY_DONE;
d69a24e0 4738
10bea9c8 4739 INIT_WORK(&work->work, mlx5_ib_handle_event);
df097a27
SM
4740 work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
4741 work->is_slave = false;
10bea9c8 4742 work->param = param;
10bea9c8
LR
4743 work->event = event;
4744
4745 queue_work(mlx5_ib_event_wq, &work->work);
df097a27
SM
4746
4747 return NOTIFY_OK;
4748}
4749
4750static int mlx5_ib_event_slave_port(struct notifier_block *nb,
4751 unsigned long event, void *param)
4752{
4753 struct mlx5_ib_event_work *work;
4754
4755 work = kmalloc(sizeof(*work), GFP_ATOMIC);
4756 if (!work)
4757 return NOTIFY_DONE;
4758
4759 INIT_WORK(&work->work, mlx5_ib_handle_event);
4760 work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
4761 work->is_slave = true;
4762 work->param = param;
4763 work->event = event;
4764 queue_work(mlx5_ib_event_wq, &work->work);
4765
4766 return NOTIFY_OK;
e126ba97
EC
4767}
4768
c43f1112
MG
4769static int set_has_smi_cap(struct mlx5_ib_dev *dev)
4770{
4771 struct mlx5_hca_vport_context vport_ctx;
4772 int err;
4773 int port;
4774
a989ea01 4775 for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) {
c43f1112
MG
4776 dev->mdev->port_caps[port - 1].has_smi = false;
4777 if (MLX5_CAP_GEN(dev->mdev, port_type) ==
4778 MLX5_CAP_PORT_TYPE_IB) {
4779 if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
4780 err = mlx5_query_hca_vport_context(dev->mdev, 0,
4781 port, 0,
4782 &vport_ctx);
4783 if (err) {
4784 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
4785 port, err);
4786 return err;
4787 }
4788 dev->mdev->port_caps[port - 1].has_smi =
4789 vport_ctx.has_smi;
4790 } else {
4791 dev->mdev->port_caps[port - 1].has_smi = true;
4792 }
4793 }
4794 }
4795 return 0;
4796}
4797
e126ba97
EC
4798static void get_ext_port_caps(struct mlx5_ib_dev *dev)
4799{
4800 int port;
4801
508562d6 4802 for (port = 1; port <= dev->num_ports; port++)
e126ba97
EC
4803 mlx5_query_ext_port_caps(dev, port);
4804}
4805
26628e2d 4806static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
e126ba97
EC
4807{
4808 struct ib_device_attr *dprops = NULL;
4809 struct ib_port_attr *pprops = NULL;
f614fc15 4810 int err = -ENOMEM;
e126ba97 4811
50ba3c18 4812 pprops = kzalloc(sizeof(*pprops), GFP_KERNEL);
e126ba97
EC
4813 if (!pprops)
4814 goto out;
4815
4816 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
4817 if (!dprops)
4818 goto out;
4819
48357091 4820 err = mlx5_ib_query_device(&dev->ib_dev, dprops, NULL);
e126ba97
EC
4821 if (err) {
4822 mlx5_ib_warn(dev, "query_device failed %d\n", err);
4823 goto out;
4824 }
4825
32f69e4b
DJ
4826 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
4827 if (err) {
4828 mlx5_ib_warn(dev, "query_port %d failed %d\n",
4829 port, err);
4830 goto out;
e126ba97
EC
4831 }
4832
32f69e4b
DJ
4833 dev->mdev->port_caps[port - 1].pkey_table_len =
4834 dprops->max_pkeys;
4835 dev->mdev->port_caps[port - 1].gid_table_len =
4836 pprops->gid_tbl_len;
4837 mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
4838 port, dprops->max_pkeys, pprops->gid_tbl_len);
4839
e126ba97
EC
4840out:
4841 kfree(pprops);
4842 kfree(dprops);
4843
4844 return err;
4845}
4846
26628e2d
MB
4847static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
4848{
4849 /* For representors use port 1, is this is the only native
4850 * port
4851 */
4852 if (dev->is_rep)
4853 return __get_port_caps(dev, 1);
4854 return __get_port_caps(dev, port);
4855}
4856
e126ba97
EC
4857static void destroy_umrc_res(struct mlx5_ib_dev *dev)
4858{
4859 int err;
4860
4861 err = mlx5_mr_cache_cleanup(dev);
4862 if (err)
4863 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
4864
32927e28 4865 if (dev->umrc.qp)
c4367a26 4866 mlx5_ib_destroy_qp(dev->umrc.qp, NULL);
32927e28
MB
4867 if (dev->umrc.cq)
4868 ib_free_cq(dev->umrc.cq);
4869 if (dev->umrc.pd)
4870 ib_dealloc_pd(dev->umrc.pd);
e126ba97
EC
4871}
4872
4873enum {
4874 MAX_UMR_WR = 128,
4875};
4876
4877static int create_umr_res(struct mlx5_ib_dev *dev)
4878{
4879 struct ib_qp_init_attr *init_attr = NULL;
4880 struct ib_qp_attr *attr = NULL;
4881 struct ib_pd *pd;
4882 struct ib_cq *cq;
4883 struct ib_qp *qp;
e126ba97
EC
4884 int ret;
4885
4886 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
4887 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
4888 if (!attr || !init_attr) {
4889 ret = -ENOMEM;
4890 goto error_0;
4891 }
4892
ed082d36 4893 pd = ib_alloc_pd(&dev->ib_dev, 0);
e126ba97
EC
4894 if (IS_ERR(pd)) {
4895 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
4896 ret = PTR_ERR(pd);
4897 goto error_0;
4898 }
4899
add08d76 4900 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
e126ba97
EC
4901 if (IS_ERR(cq)) {
4902 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
4903 ret = PTR_ERR(cq);
4904 goto error_2;
4905 }
e126ba97
EC
4906
4907 init_attr->send_cq = cq;
4908 init_attr->recv_cq = cq;
4909 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
4910 init_attr->cap.max_send_wr = MAX_UMR_WR;
4911 init_attr->cap.max_send_sge = 1;
4912 init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
4913 init_attr->port_num = 1;
4914 qp = mlx5_ib_create_qp(pd, init_attr, NULL);
4915 if (IS_ERR(qp)) {
4916 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
4917 ret = PTR_ERR(qp);
4918 goto error_3;
4919 }
4920 qp->device = &dev->ib_dev;
4921 qp->real_qp = qp;
4922 qp->uobject = NULL;
4923 qp->qp_type = MLX5_IB_QPT_REG_UMR;
31fde034
MD
4924 qp->send_cq = init_attr->send_cq;
4925 qp->recv_cq = init_attr->recv_cq;
e126ba97
EC
4926
4927 attr->qp_state = IB_QPS_INIT;
4928 attr->port_num = 1;
4929 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
4930 IB_QP_PORT, NULL);
4931 if (ret) {
4932 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
4933 goto error_4;
4934 }
4935
4936 memset(attr, 0, sizeof(*attr));
4937 attr->qp_state = IB_QPS_RTR;
4938 attr->path_mtu = IB_MTU_256;
4939
4940 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4941 if (ret) {
4942 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
4943 goto error_4;
4944 }
4945
4946 memset(attr, 0, sizeof(*attr));
4947 attr->qp_state = IB_QPS_RTS;
4948 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4949 if (ret) {
4950 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
4951 goto error_4;
4952 }
4953
4954 dev->umrc.qp = qp;
4955 dev->umrc.cq = cq;
e126ba97
EC
4956 dev->umrc.pd = pd;
4957
4958 sema_init(&dev->umrc.sem, MAX_UMR_WR);
4959 ret = mlx5_mr_cache_init(dev);
4960 if (ret) {
4961 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4962 goto error_4;
4963 }
4964
4965 kfree(attr);
4966 kfree(init_attr);
4967
4968 return 0;
4969
4970error_4:
c4367a26 4971 mlx5_ib_destroy_qp(qp, NULL);
32927e28 4972 dev->umrc.qp = NULL;
e126ba97
EC
4973
4974error_3:
add08d76 4975 ib_free_cq(cq);
32927e28 4976 dev->umrc.cq = NULL;
e126ba97
EC
4977
4978error_2:
e126ba97 4979 ib_dealloc_pd(pd);
32927e28 4980 dev->umrc.pd = NULL;
e126ba97
EC
4981
4982error_0:
4983 kfree(attr);
4984 kfree(init_attr);
4985 return ret;
4986}
4987
6e8484c5
MG
4988static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
4989{
4990 switch (umr_fence_cap) {
4991 case MLX5_CAP_UMR_FENCE_NONE:
4992 return MLX5_FENCE_MODE_NONE;
4993 case MLX5_CAP_UMR_FENCE_SMALL:
4994 return MLX5_FENCE_MODE_INITIATOR_SMALL;
4995 default:
4996 return MLX5_FENCE_MODE_STRONG_ORDERING;
4997 }
4998}
4999
e126ba97
EC
5000static int create_dev_resources(struct mlx5_ib_resources *devr)
5001{
5002 struct ib_srq_init_attr attr;
5003 struct mlx5_ib_dev *dev;
21a428a0 5004 struct ib_device *ibdev;
bcf4c1ea 5005 struct ib_cq_init_attr cq_attr = {.cqe = 1};
7722f47e 5006 int port;
e126ba97
EC
5007 int ret = 0;
5008
5009 dev = container_of(devr, struct mlx5_ib_dev, devr);
21a428a0 5010 ibdev = &dev->ib_dev;
e126ba97 5011
d16e91da
HE
5012 mutex_init(&devr->mutex);
5013
21a428a0
LR
5014 devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd);
5015 if (!devr->p0)
5016 return -ENOMEM;
5017
5018 devr->p0->device = ibdev;
e126ba97
EC
5019 devr->p0->uobject = NULL;
5020 atomic_set(&devr->p0->usecnt, 0);
5021
ff23dfa1 5022 ret = mlx5_ib_alloc_pd(devr->p0, NULL);
21a428a0
LR
5023 if (ret)
5024 goto error0;
5025
e39afe3d
LR
5026 devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq);
5027 if (!devr->c0) {
5028 ret = -ENOMEM;
e126ba97
EC
5029 goto error1;
5030 }
e39afe3d
LR
5031
5032 devr->c0->device = &dev->ib_dev;
e126ba97
EC
5033 atomic_set(&devr->c0->usecnt, 0);
5034
e39afe3d
LR
5035 ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL);
5036 if (ret)
5037 goto err_create_cq;
5038
ff23dfa1 5039 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
e126ba97
EC
5040 if (IS_ERR(devr->x0)) {
5041 ret = PTR_ERR(devr->x0);
5042 goto error2;
5043 }
5044 devr->x0->device = &dev->ib_dev;
5045 devr->x0->inode = NULL;
5046 atomic_set(&devr->x0->usecnt, 0);
5047 mutex_init(&devr->x0->tgt_qp_mutex);
5048 INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
5049
ff23dfa1 5050 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
e126ba97
EC
5051 if (IS_ERR(devr->x1)) {
5052 ret = PTR_ERR(devr->x1);
5053 goto error3;
5054 }
5055 devr->x1->device = &dev->ib_dev;
5056 devr->x1->inode = NULL;
5057 atomic_set(&devr->x1->usecnt, 0);
5058 mutex_init(&devr->x1->tgt_qp_mutex);
5059 INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
5060
5061 memset(&attr, 0, sizeof(attr));
5062 attr.attr.max_sge = 1;
5063 attr.attr.max_wr = 1;
5064 attr.srq_type = IB_SRQT_XRC;
1a56ff6d 5065 attr.ext.cq = devr->c0;
e126ba97
EC
5066 attr.ext.xrc.xrcd = devr->x0;
5067
68e326de
LR
5068 devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
5069 if (!devr->s0) {
5070 ret = -ENOMEM;
e126ba97
EC
5071 goto error4;
5072 }
68e326de 5073
e126ba97
EC
5074 devr->s0->device = &dev->ib_dev;
5075 devr->s0->pd = devr->p0;
e126ba97
EC
5076 devr->s0->srq_type = IB_SRQT_XRC;
5077 devr->s0->ext.xrc.xrcd = devr->x0;
1a56ff6d 5078 devr->s0->ext.cq = devr->c0;
68e326de
LR
5079 ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
5080 if (ret)
5081 goto err_create;
5082
e126ba97 5083 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
1a56ff6d 5084 atomic_inc(&devr->s0->ext.cq->usecnt);
e126ba97
EC
5085 atomic_inc(&devr->p0->usecnt);
5086 atomic_set(&devr->s0->usecnt, 0);
5087
4aa17b28
HA
5088 memset(&attr, 0, sizeof(attr));
5089 attr.attr.max_sge = 1;
5090 attr.attr.max_wr = 1;
5091 attr.srq_type = IB_SRQT_BASIC;
68e326de
LR
5092 devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
5093 if (!devr->s1) {
5094 ret = -ENOMEM;
4aa17b28
HA
5095 goto error5;
5096 }
68e326de 5097
4aa17b28
HA
5098 devr->s1->device = &dev->ib_dev;
5099 devr->s1->pd = devr->p0;
4aa17b28 5100 devr->s1->srq_type = IB_SRQT_BASIC;
1a56ff6d 5101 devr->s1->ext.cq = devr->c0;
68e326de
LR
5102
5103 ret = mlx5_ib_create_srq(devr->s1, &attr, NULL);
5104 if (ret)
5105 goto error6;
5106
4aa17b28 5107 atomic_inc(&devr->p0->usecnt);
1a56ff6d 5108 atomic_set(&devr->s1->usecnt, 0);
4aa17b28 5109
7722f47e
HE
5110 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
5111 INIT_WORK(&devr->ports[port].pkey_change_work,
5112 pkey_change_handler);
5113 devr->ports[port].devr = devr;
5114 }
5115
e126ba97
EC
5116 return 0;
5117
68e326de
LR
5118error6:
5119 kfree(devr->s1);
4aa17b28 5120error5:
c4367a26 5121 mlx5_ib_destroy_srq(devr->s0, NULL);
68e326de
LR
5122err_create:
5123 kfree(devr->s0);
e126ba97 5124error4:
c4367a26 5125 mlx5_ib_dealloc_xrcd(devr->x1, NULL);
e126ba97 5126error3:
c4367a26 5127 mlx5_ib_dealloc_xrcd(devr->x0, NULL);
e126ba97 5128error2:
c4367a26 5129 mlx5_ib_destroy_cq(devr->c0, NULL);
e39afe3d
LR
5130err_create_cq:
5131 kfree(devr->c0);
e126ba97 5132error1:
c4367a26 5133 mlx5_ib_dealloc_pd(devr->p0, NULL);
e126ba97 5134error0:
21a428a0 5135 kfree(devr->p0);
e126ba97
EC
5136 return ret;
5137}
5138
5139static void destroy_dev_resources(struct mlx5_ib_resources *devr)
5140{
7722f47e
HE
5141 int port;
5142
c4367a26 5143 mlx5_ib_destroy_srq(devr->s1, NULL);
68e326de 5144 kfree(devr->s1);
c4367a26 5145 mlx5_ib_destroy_srq(devr->s0, NULL);
68e326de 5146 kfree(devr->s0);
c4367a26
SR
5147 mlx5_ib_dealloc_xrcd(devr->x0, NULL);
5148 mlx5_ib_dealloc_xrcd(devr->x1, NULL);
5149 mlx5_ib_destroy_cq(devr->c0, NULL);
e39afe3d 5150 kfree(devr->c0);
c4367a26 5151 mlx5_ib_dealloc_pd(devr->p0, NULL);
21a428a0 5152 kfree(devr->p0);
7722f47e
HE
5153
5154 /* Make sure no change P_Key work items are still executing */
5d8f6a0e 5155 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
7722f47e 5156 cancel_work_sync(&devr->ports[port].pkey_change_work);
e126ba97
EC
5157}
5158
b02289b3
AK
5159static u32 get_core_cap_flags(struct ib_device *ibdev,
5160 struct mlx5_hca_vport_context *rep)
e53505a8
AS
5161{
5162 struct mlx5_ib_dev *dev = to_mdev(ibdev);
5163 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
5164 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
5165 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
85c7c014 5166 bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
e53505a8
AS
5167 u32 ret = 0;
5168
b02289b3
AK
5169 if (rep->grh_required)
5170 ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
5171
e53505a8 5172 if (ll == IB_LINK_LAYER_INFINIBAND)
b02289b3 5173 return ret | RDMA_CORE_PORT_IBA_IB;
e53505a8 5174
85c7c014 5175 if (raw_support)
b02289b3 5176 ret |= RDMA_CORE_PORT_RAW_PACKET;
72cd5717 5177
e53505a8 5178 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
72cd5717 5179 return ret;
e53505a8
AS
5180
5181 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
72cd5717 5182 return ret;
e53505a8
AS
5183
5184 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
5185 ret |= RDMA_CORE_PORT_IBA_ROCE;
5186
5187 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
5188 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
5189
5190 return ret;
5191}
5192
7738613e
IW
5193static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
5194 struct ib_port_immutable *immutable)
5195{
5196 struct ib_port_attr attr;
ca5b91d6
OG
5197 struct mlx5_ib_dev *dev = to_mdev(ibdev);
5198 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
b02289b3 5199 struct mlx5_hca_vport_context rep = {0};
7738613e
IW
5200 int err;
5201
c4550c63 5202 err = ib_query_port(ibdev, port_num, &attr);
7738613e
IW
5203 if (err)
5204 return err;
5205
b02289b3
AK
5206 if (ll == IB_LINK_LAYER_INFINIBAND) {
5207 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
5208 &rep);
5209 if (err)
5210 return err;
5211 }
5212
7738613e
IW
5213 immutable->pkey_tbl_len = attr.pkey_tbl_len;
5214 immutable->gid_tbl_len = attr.gid_tbl_len;
b02289b3 5215 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
94de879c 5216 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
7738613e
IW
5217
5218 return 0;
5219}
5220
8e6efa3a
MB
5221static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
5222 struct ib_port_immutable *immutable)
5223{
5224 struct ib_port_attr attr;
5225 int err;
5226
5227 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
5228
5229 err = ib_query_port(ibdev, port_num, &attr);
5230 if (err)
5231 return err;
5232
5233 immutable->pkey_tbl_len = attr.pkey_tbl_len;
5234 immutable->gid_tbl_len = attr.gid_tbl_len;
5235 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
5236
5237 return 0;
5238}
5239
9abb0d1b 5240static void get_dev_fw_str(struct ib_device *ibdev, char *str)
c7342823
IW
5241{
5242 struct mlx5_ib_dev *dev =
5243 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
9abb0d1b
LR
5244 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
5245 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
5246 fw_rev_sub(dev->mdev));
c7342823
IW
5247}
5248
45f95acd 5249static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
9ef9c640
AH
5250{
5251 struct mlx5_core_dev *mdev = dev->mdev;
5252 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
5253 MLX5_FLOW_NAMESPACE_LAG);
5254 struct mlx5_flow_table *ft;
5255 int err;
5256
7c34ec19 5257 if (!ns || !mlx5_lag_is_roce(mdev))
9ef9c640
AH
5258 return 0;
5259
5260 err = mlx5_cmd_create_vport_lag(mdev);
5261 if (err)
5262 return err;
5263
5264 ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
5265 if (IS_ERR(ft)) {
5266 err = PTR_ERR(ft);
5267 goto err_destroy_vport_lag;
5268 }
5269
9a4ca38d 5270 dev->flow_db->lag_demux_ft = ft;
7c34ec19 5271 dev->lag_active = true;
9ef9c640
AH
5272 return 0;
5273
5274err_destroy_vport_lag:
5275 mlx5_cmd_destroy_vport_lag(mdev);
5276 return err;
5277}
5278
45f95acd 5279static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
9ef9c640
AH
5280{
5281 struct mlx5_core_dev *mdev = dev->mdev;
5282
7c34ec19
AH
5283 if (dev->lag_active) {
5284 dev->lag_active = false;
5285
9a4ca38d
MB
5286 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
5287 dev->flow_db->lag_demux_ft = NULL;
9ef9c640
AH
5288
5289 mlx5_cmd_destroy_vport_lag(mdev);
5290 }
5291}
5292
7fd8aefb 5293static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
d012f5d6
OG
5294{
5295 int err;
5296
95579e78
MB
5297 dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
5298 err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
d012f5d6 5299 if (err) {
95579e78 5300 dev->port[port_num].roce.nb.notifier_call = NULL;
d012f5d6
OG
5301 return err;
5302 }
5303
5304 return 0;
5305}
5306
7fd8aefb 5307static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
5ec8c83e 5308{
95579e78
MB
5309 if (dev->port[port_num].roce.nb.notifier_call) {
5310 unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
5311 dev->port[port_num].roce.nb.notifier_call = NULL;
5ec8c83e
AH
5312 }
5313}
5314
e3f1ed1f 5315static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
fc24fc5e 5316{
e53505a8
AS
5317 int err;
5318
94de879c
MG
5319 err = mlx5_nic_vport_enable_roce(dev->mdev);
5320 if (err)
5321 return err;
e53505a8 5322
45f95acd 5323 err = mlx5_eth_lag_init(dev);
9ef9c640
AH
5324 if (err)
5325 goto err_disable_roce;
5326
e53505a8
AS
5327 return 0;
5328
9ef9c640 5329err_disable_roce:
94de879c 5330 mlx5_nic_vport_disable_roce(dev->mdev);
9ef9c640 5331
e53505a8 5332 return err;
fc24fc5e
AS
5333}
5334
45f95acd 5335static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
fc24fc5e 5336{
45f95acd 5337 mlx5_eth_lag_cleanup(dev);
94de879c 5338 mlx5_nic_vport_disable_roce(dev->mdev);
fc24fc5e
AS
5339}
5340
e1f24a79 5341struct mlx5_ib_counter {
7c16f477
KH
5342 const char *name;
5343 size_t offset;
5344};
5345
5346#define INIT_Q_COUNTER(_name) \
5347 { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
5348
e1f24a79 5349static const struct mlx5_ib_counter basic_q_cnts[] = {
7c16f477
KH
5350 INIT_Q_COUNTER(rx_write_requests),
5351 INIT_Q_COUNTER(rx_read_requests),
5352 INIT_Q_COUNTER(rx_atomic_requests),
5353 INIT_Q_COUNTER(out_of_buffer),
5354};
5355
e1f24a79 5356static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
7c16f477
KH
5357 INIT_Q_COUNTER(out_of_sequence),
5358};
5359
e1f24a79 5360static const struct mlx5_ib_counter retrans_q_cnts[] = {
7c16f477
KH
5361 INIT_Q_COUNTER(duplicate_request),
5362 INIT_Q_COUNTER(rnr_nak_retry_err),
5363 INIT_Q_COUNTER(packet_seq_err),
5364 INIT_Q_COUNTER(implied_nak_seq_err),
5365 INIT_Q_COUNTER(local_ack_timeout_err),
5366};
5367
e1f24a79
PP
5368#define INIT_CONG_COUNTER(_name) \
5369 { .name = #_name, .offset = \
5370 MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
5371
5372static const struct mlx5_ib_counter cong_cnts[] = {
5373 INIT_CONG_COUNTER(rp_cnp_ignored),
5374 INIT_CONG_COUNTER(rp_cnp_handled),
5375 INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
5376 INIT_CONG_COUNTER(np_cnp_sent),
5377};
5378
58dcb60a
PP
5379static const struct mlx5_ib_counter extended_err_cnts[] = {
5380 INIT_Q_COUNTER(resp_local_length_error),
5381 INIT_Q_COUNTER(resp_cqe_error),
5382 INIT_Q_COUNTER(req_cqe_error),
5383 INIT_Q_COUNTER(req_remote_invalid_request),
5384 INIT_Q_COUNTER(req_remote_access_errors),
5385 INIT_Q_COUNTER(resp_remote_access_errors),
5386 INIT_Q_COUNTER(resp_cqe_flush_error),
5387 INIT_Q_COUNTER(req_cqe_flush_error),
5388};
5389
d7fab916
AH
5390static const struct mlx5_ib_counter roce_accl_cnts[] = {
5391 INIT_Q_COUNTER(roce_adp_retrans),
5392 INIT_Q_COUNTER(roce_adp_retrans_to),
5393 INIT_Q_COUNTER(roce_slow_restart),
5394 INIT_Q_COUNTER(roce_slow_restart_cnps),
5395 INIT_Q_COUNTER(roce_slow_restart_trans),
5396};
5397
9f876f3d
TB
5398#define INIT_EXT_PPCNT_COUNTER(_name) \
5399 { .name = #_name, .offset = \
5400 MLX5_BYTE_OFF(ppcnt_reg, \
5401 counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
5402
5403static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
5404 INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
5405};
5406
3e1f000f
PP
5407static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev)
5408{
5409 return MLX5_ESWITCH_MANAGER(mdev) &&
5410 mlx5_ib_eswitch_mode(mdev->priv.eswitch) ==
5411 MLX5_ESWITCH_OFFLOADS;
5412}
5413
e1f24a79 5414static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
0837e86a 5415{
3e1f000f 5416 int num_cnt_ports;
aac4492e 5417 int i;
0837e86a 5418
3e1f000f
PP
5419 num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
5420
5421 for (i = 0; i < num_cnt_ports; i++) {
921c0f5b 5422 if (dev->port[i].cnts.set_id_valid)
aac4492e
DJ
5423 mlx5_core_dealloc_q_counter(dev->mdev,
5424 dev->port[i].cnts.set_id);
e1f24a79
PP
5425 kfree(dev->port[i].cnts.names);
5426 kfree(dev->port[i].cnts.offsets);
7c16f477
KH
5427 }
5428}
5429
e1f24a79
PP
5430static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
5431 struct mlx5_ib_counters *cnts)
7c16f477
KH
5432{
5433 u32 num_counters;
5434
5435 num_counters = ARRAY_SIZE(basic_q_cnts);
5436
5437 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
5438 num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
5439
5440 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
5441 num_counters += ARRAY_SIZE(retrans_q_cnts);
58dcb60a
PP
5442
5443 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
5444 num_counters += ARRAY_SIZE(extended_err_cnts);
5445
d7fab916
AH
5446 if (MLX5_CAP_GEN(dev->mdev, roce_accl))
5447 num_counters += ARRAY_SIZE(roce_accl_cnts);
5448
e1f24a79 5449 cnts->num_q_counters = num_counters;
7c16f477 5450
e1f24a79
PP
5451 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5452 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
5453 num_counters += ARRAY_SIZE(cong_cnts);
5454 }
9f876f3d
TB
5455 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5456 cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
5457 num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
5458 }
e1f24a79
PP
5459 cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
5460 if (!cnts->names)
7c16f477
KH
5461 return -ENOMEM;
5462
e1f24a79
PP
5463 cnts->offsets = kcalloc(num_counters,
5464 sizeof(cnts->offsets), GFP_KERNEL);
5465 if (!cnts->offsets)
7c16f477
KH
5466 goto err_names;
5467
7c16f477
KH
5468 return 0;
5469
5470err_names:
e1f24a79 5471 kfree(cnts->names);
aac4492e 5472 cnts->names = NULL;
7c16f477
KH
5473 return -ENOMEM;
5474}
5475
e1f24a79
PP
5476static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
5477 const char **names,
5478 size_t *offsets)
7c16f477
KH
5479{
5480 int i;
5481 int j = 0;
5482
5483 for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
5484 names[j] = basic_q_cnts[i].name;
5485 offsets[j] = basic_q_cnts[i].offset;
5486 }
5487
5488 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
5489 for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
5490 names[j] = out_of_seq_q_cnts[i].name;
5491 offsets[j] = out_of_seq_q_cnts[i].offset;
5492 }
5493 }
5494
5495 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
5496 for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
5497 names[j] = retrans_q_cnts[i].name;
5498 offsets[j] = retrans_q_cnts[i].offset;
5499 }
5500 }
e1f24a79 5501
58dcb60a
PP
5502 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
5503 for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
5504 names[j] = extended_err_cnts[i].name;
5505 offsets[j] = extended_err_cnts[i].offset;
5506 }
5507 }
5508
d7fab916
AH
5509 if (MLX5_CAP_GEN(dev->mdev, roce_accl)) {
5510 for (i = 0; i < ARRAY_SIZE(roce_accl_cnts); i++, j++) {
5511 names[j] = roce_accl_cnts[i].name;
5512 offsets[j] = roce_accl_cnts[i].offset;
5513 }
5514 }
5515
e1f24a79
PP
5516 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5517 for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
5518 names[j] = cong_cnts[i].name;
5519 offsets[j] = cong_cnts[i].offset;
5520 }
5521 }
9f876f3d
TB
5522
5523 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5524 for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
5525 names[j] = ext_ppcnt_cnts[i].name;
5526 offsets[j] = ext_ppcnt_cnts[i].offset;
5527 }
5528 }
0837e86a
MB
5529}
5530
e1f24a79 5531static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
0837e86a 5532{
3e1f000f 5533 int num_cnt_ports;
aac4492e 5534 int err = 0;
0837e86a 5535 int i;
aa74be6e
YH
5536 bool is_shared;
5537
5538 is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
3e1f000f 5539 num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
0837e86a 5540
3e1f000f 5541 for (i = 0; i < num_cnt_ports; i++) {
aac4492e
DJ
5542 err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
5543 if (err)
5544 goto err_alloc;
5545
5546 mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
5547 dev->port[i].cnts.offsets);
7c16f477 5548
aa74be6e
YH
5549 err = mlx5_cmd_alloc_q_counter(dev->mdev,
5550 &dev->port[i].cnts.set_id,
5551 is_shared ?
5552 MLX5_SHARED_RESOURCE_UID : 0);
aac4492e 5553 if (err) {
0837e86a
MB
5554 mlx5_ib_warn(dev,
5555 "couldn't allocate queue counter for port %d, err %d\n",
aac4492e
DJ
5556 i + 1, err);
5557 goto err_alloc;
0837e86a 5558 }
aac4492e 5559 dev->port[i].cnts.set_id_valid = true;
0837e86a 5560 }
0837e86a
MB
5561 return 0;
5562
aac4492e
DJ
5563err_alloc:
5564 mlx5_ib_dealloc_counters(dev);
5565 return err;
0837e86a
MB
5566}
5567
3e1f000f
PP
5568static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
5569 u8 port_num)
5570{
5571 return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts :
5572 &dev->port[port_num].cnts;
5573}
5574
5575/**
5576 * mlx5_ib_get_counters_id - Returns counters id to use for device+port
5577 * @dev: Pointer to mlx5 IB device
5578 * @port_num: Zero based port number
5579 *
5580 * mlx5_ib_get_counters_id() Returns counters set id to use for given
5581 * device port combination in switchdev and non switchdev mode of the
5582 * parent device.
5583 */
5584u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num)
5585{
5586 const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
5587
5588 return cnts->set_id;
5589}
5590
0ad17a8f
MB
5591static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
5592 u8 port_num)
5593{
7c16f477 5594 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3e1f000f
PP
5595 const struct mlx5_ib_counters *cnts;
5596 bool is_switchdev = is_mdev_switchdev_mode(dev->mdev);
0ad17a8f 5597
3e1f000f 5598 if ((is_switchdev && port_num) || (!is_switchdev && !port_num))
0ad17a8f
MB
5599 return NULL;
5600
3e1f000f
PP
5601 cnts = get_counters(dev, port_num - 1);
5602
5dcecbc9
PP
5603 return rdma_alloc_hw_stats_struct(cnts->names,
5604 cnts->num_q_counters +
5605 cnts->num_cong_counters +
5606 cnts->num_ext_ppcnt_counters,
0ad17a8f
MB
5607 RDMA_HW_STATS_DEFAULT_LIFESPAN);
5608}
5609
aac4492e 5610static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
5dcecbc9 5611 const struct mlx5_ib_counters *cnts,
318d535c
MZ
5612 struct rdma_hw_stats *stats,
5613 u16 set_id)
0ad17a8f 5614{
0ad17a8f
MB
5615 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
5616 void *out;
5617 __be32 val;
e1f24a79 5618 int ret, i;
0ad17a8f 5619
1b9a07ee 5620 out = kvzalloc(outlen, GFP_KERNEL);
0ad17a8f
MB
5621 if (!out)
5622 return -ENOMEM;
5623
318d535c 5624 ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen);
0ad17a8f
MB
5625 if (ret)
5626 goto free;
5627
5dcecbc9
PP
5628 for (i = 0; i < cnts->num_q_counters; i++) {
5629 val = *(__be32 *)(out + cnts->offsets[i]);
0ad17a8f
MB
5630 stats->value[i] = (u64)be32_to_cpu(val);
5631 }
7c16f477 5632
0ad17a8f
MB
5633free:
5634 kvfree(out);
e1f24a79
PP
5635 return ret;
5636}
5637
9f876f3d 5638static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
5dcecbc9
PP
5639 const struct mlx5_ib_counters *cnts,
5640 struct rdma_hw_stats *stats)
9f876f3d 5641{
5dcecbc9 5642 int offset = cnts->num_q_counters + cnts->num_cong_counters;
9f876f3d
TB
5643 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
5644 int ret, i;
5645 void *out;
5646
5647 out = kvzalloc(sz, GFP_KERNEL);
5648 if (!out)
5649 return -ENOMEM;
5650
5651 ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
5652 if (ret)
5653 goto free;
5654
5dcecbc9 5655 for (i = 0; i < cnts->num_ext_ppcnt_counters; i++)
9f876f3d
TB
5656 stats->value[i + offset] =
5657 be64_to_cpup((__be64 *)(out +
5dcecbc9 5658 cnts->offsets[i + offset]));
9f876f3d
TB
5659free:
5660 kvfree(out);
5661 return ret;
5662}
5663
e1f24a79
PP
5664static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
5665 struct rdma_hw_stats *stats,
5666 u8 port_num, int index)
5667{
5668 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3e1f000f 5669 const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
aac4492e 5670 struct mlx5_core_dev *mdev;
e1f24a79 5671 int ret, num_counters;
aac4492e 5672 u8 mdev_port_num;
e1f24a79
PP
5673
5674 if (!stats)
5675 return -EINVAL;
5676
5dcecbc9
PP
5677 num_counters = cnts->num_q_counters +
5678 cnts->num_cong_counters +
5679 cnts->num_ext_ppcnt_counters;
aac4492e
DJ
5680
5681 /* q_counters are per IB device, query the master mdev */
5dcecbc9 5682 ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id);
e1f24a79
PP
5683 if (ret)
5684 return ret;
e1f24a79 5685
9f876f3d 5686 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5dcecbc9 5687 ret = mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats);
9f876f3d
TB
5688 if (ret)
5689 return ret;
5690 }
5691
e1f24a79 5692 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
aac4492e
DJ
5693 mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
5694 &mdev_port_num);
5695 if (!mdev) {
5696 /* If port is not affiliated yet, its in down state
5697 * which doesn't have any counters yet, so it would be
5698 * zero. So no need to read from the HCA.
5699 */
5700 goto done;
5701 }
71a0ff65
MD
5702 ret = mlx5_lag_query_cong_counters(dev->mdev,
5703 stats->value +
5dcecbc9
PP
5704 cnts->num_q_counters,
5705 cnts->num_cong_counters,
5706 cnts->offsets +
5707 cnts->num_q_counters);
aac4492e
DJ
5708
5709 mlx5_ib_put_native_port_mdev(dev, port_num);
e1f24a79
PP
5710 if (ret)
5711 return ret;
e1f24a79
PP
5712 }
5713
aac4492e 5714done:
e1f24a79 5715 return num_counters;
0ad17a8f
MB
5716}
5717
18d422ce
MZ
5718static struct rdma_hw_stats *
5719mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
5720{
5721 struct mlx5_ib_dev *dev = to_mdev(counter->device);
5dcecbc9 5722 const struct mlx5_ib_counters *cnts =
3e1f000f 5723 get_counters(dev, counter->port - 1);
18d422ce
MZ
5724
5725 /* Q counters are in the beginning of all counters */
5dcecbc9
PP
5726 return rdma_alloc_hw_stats_struct(cnts->names,
5727 cnts->num_q_counters,
18d422ce
MZ
5728 RDMA_HW_STATS_DEFAULT_LIFESPAN);
5729}
5730
5731static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
5732{
5733 struct mlx5_ib_dev *dev = to_mdev(counter->device);
3e1f000f
PP
5734 const struct mlx5_ib_counters *cnts =
5735 get_counters(dev, counter->port - 1);
18d422ce 5736
5dcecbc9 5737 return mlx5_ib_query_q_counters(dev->mdev, cnts,
18d422ce
MZ
5738 counter->stats, counter->id);
5739}
5740
45842fc6
MZ
5741static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
5742 struct ib_qp *qp)
5743{
5744 struct mlx5_ib_dev *dev = to_mdev(qp->device);
5745 u16 cnt_set_id = 0;
5746 int err;
5747
5748 if (!counter->id) {
5749 err = mlx5_cmd_alloc_q_counter(dev->mdev,
5750 &cnt_set_id,
5751 MLX5_SHARED_RESOURCE_UID);
5752 if (err)
5753 return err;
5754 counter->id = cnt_set_id;
5755 }
5756
5757 err = mlx5_ib_qp_set_counter(qp, counter);
5758 if (err)
5759 goto fail_set_counter;
5760
5761 return 0;
5762
5763fail_set_counter:
5764 mlx5_core_dealloc_q_counter(dev->mdev, cnt_set_id);
5765 counter->id = 0;
5766
5767 return err;
5768}
5769
5770static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp)
5771{
5772 return mlx5_ib_qp_set_counter(qp, NULL);
5773}
5774
5775static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
5776{
5777 struct mlx5_ib_dev *dev = to_mdev(counter->device);
5778
5779 return mlx5_core_dealloc_q_counter(dev->mdev, counter->id);
5780}
5781
f6a8a19b
DD
5782static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
5783 enum rdma_netdev_t type,
5784 struct rdma_netdev_alloc_params *params)
693dfd5a
ES
5785{
5786 if (type != RDMA_NETDEV_IPOIB)
f6a8a19b 5787 return -EOPNOTSUPP;
693dfd5a 5788
f6a8a19b 5789 return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
693dfd5a
ES
5790}
5791
fe248c3a
MG
5792static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
5793{
09b0965e 5794 if (!dev->delay_drop.dir_debugfs)
fe248c3a 5795 return;
09b0965e
GKH
5796 debugfs_remove_recursive(dev->delay_drop.dir_debugfs);
5797 dev->delay_drop.dir_debugfs = NULL;
fe248c3a
MG
5798}
5799
03404e8a
MG
5800static void cancel_delay_drop(struct mlx5_ib_dev *dev)
5801{
5802 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5803 return;
5804
5805 cancel_work_sync(&dev->delay_drop.delay_drop_work);
fe248c3a
MG
5806 delay_drop_debugfs_cleanup(dev);
5807}
5808
5809static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
5810 size_t count, loff_t *pos)
5811{
5812 struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5813 char lbuf[20];
5814 int len;
5815
5816 len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
5817 return simple_read_from_buffer(buf, count, pos, lbuf, len);
5818}
5819
5820static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
5821 size_t count, loff_t *pos)
5822{
5823 struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5824 u32 timeout;
5825 u32 var;
5826
5827 if (kstrtouint_from_user(buf, count, 0, &var))
5828 return -EFAULT;
5829
5830 timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
5831 1000);
5832 if (timeout != var)
5833 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
5834 timeout);
5835
5836 delay_drop->timeout = timeout;
5837
5838 return count;
5839}
5840
5841static const struct file_operations fops_delay_drop_timeout = {
5842 .owner = THIS_MODULE,
5843 .open = simple_open,
5844 .write = delay_drop_timeout_write,
5845 .read = delay_drop_timeout_read,
5846};
5847
09b0965e 5848static void delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
fe248c3a 5849{
09b0965e 5850 struct dentry *root;
fe248c3a
MG
5851
5852 if (!mlx5_debugfs_root)
09b0965e 5853 return;
fe248c3a 5854
09b0965e
GKH
5855 root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root);
5856 dev->delay_drop.dir_debugfs = root;
fe248c3a 5857
09b0965e
GKH
5858 debugfs_create_atomic_t("num_timeout_events", 0400, root,
5859 &dev->delay_drop.events_cnt);
5860 debugfs_create_atomic_t("num_rqs", 0400, root,
5861 &dev->delay_drop.rqs_cnt);
5862 debugfs_create_file("timeout", 0600, root, &dev->delay_drop,
5863 &fops_delay_drop_timeout);
03404e8a
MG
5864}
5865
5866static void init_delay_drop(struct mlx5_ib_dev *dev)
5867{
5868 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5869 return;
5870
5871 mutex_init(&dev->delay_drop.lock);
5872 dev->delay_drop.dev = dev;
5873 dev->delay_drop.activate = false;
5874 dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
5875 INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
fe248c3a
MG
5876 atomic_set(&dev->delay_drop.rqs_cnt, 0);
5877 atomic_set(&dev->delay_drop.events_cnt, 0);
5878
09b0965e 5879 delay_drop_debugfs_init(dev);
03404e8a
MG
5880}
5881
32f69e4b
DJ
5882static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
5883 struct mlx5_ib_multiport_info *mpi)
5884{
5885 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5886 struct mlx5_ib_port *port = &ibdev->port[port_num];
5887 int comps;
5888 int err;
5889 int i;
5890
9dc4cfff
LR
5891 lockdep_assert_held(&mlx5_ib_multiport_mutex);
5892
a9e546e7
PP
5893 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
5894
32f69e4b
DJ
5895 spin_lock(&port->mp.mpi_lock);
5896 if (!mpi->ibdev) {
5897 spin_unlock(&port->mp.mpi_lock);
5898 return;
5899 }
df097a27 5900
32f69e4b
DJ
5901 mpi->ibdev = NULL;
5902
5903 spin_unlock(&port->mp.mpi_lock);
23eaf3b5
LR
5904 if (mpi->mdev_events.notifier_call)
5905 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
5906 mpi->mdev_events.notifier_call = NULL;
32f69e4b
DJ
5907 mlx5_remove_netdev_notifier(ibdev, port_num);
5908 spin_lock(&port->mp.mpi_lock);
5909
5910 comps = mpi->mdev_refcnt;
5911 if (comps) {
5912 mpi->unaffiliate = true;
5913 init_completion(&mpi->unref_comp);
5914 spin_unlock(&port->mp.mpi_lock);
5915
5916 for (i = 0; i < comps; i++)
5917 wait_for_completion(&mpi->unref_comp);
5918
5919 spin_lock(&port->mp.mpi_lock);
5920 mpi->unaffiliate = false;
5921 }
5922
5923 port->mp.mpi = NULL;
5924
5925 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
5926
5927 spin_unlock(&port->mp.mpi_lock);
5928
5929 err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
5930
5931 mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
5932 /* Log an error, still needed to cleanup the pointers and add
5933 * it back to the list.
5934 */
5935 if (err)
5936 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
5937 port_num + 1);
5938
95579e78 5939 ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
32f69e4b
DJ
5940}
5941
32f69e4b
DJ
5942static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
5943 struct mlx5_ib_multiport_info *mpi)
5944{
5945 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5946 int err;
5947
9dc4cfff
LR
5948 lockdep_assert_held(&mlx5_ib_multiport_mutex);
5949
32f69e4b
DJ
5950 spin_lock(&ibdev->port[port_num].mp.mpi_lock);
5951 if (ibdev->port[port_num].mp.mpi) {
2577188e
QH
5952 mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
5953 port_num + 1);
32f69e4b
DJ
5954 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5955 return false;
5956 }
5957
5958 ibdev->port[port_num].mp.mpi = mpi;
5959 mpi->ibdev = ibdev;
df097a27 5960 mpi->mdev_events.notifier_call = NULL;
32f69e4b
DJ
5961 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5962
5963 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
5964 if (err)
5965 goto unbind;
5966
5967 err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
5968 if (err)
5969 goto unbind;
5970
5971 err = mlx5_add_netdev_notifier(ibdev, port_num);
5972 if (err) {
5973 mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
5974 port_num + 1);
5975 goto unbind;
5976 }
5977
df097a27
SM
5978 mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
5979 mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
5980
73eb8f03 5981 mlx5_ib_init_cong_debugfs(ibdev, port_num);
a9e546e7 5982
32f69e4b
DJ
5983 return true;
5984
5985unbind:
5986 mlx5_ib_unbind_slave_port(ibdev, mpi);
5987 return false;
5988}
5989
5990static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
5991{
5992 int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5993 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5994 port_num + 1);
5995 struct mlx5_ib_multiport_info *mpi;
5996 int err;
5997 int i;
5998
5999 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
6000 return 0;
6001
6002 err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
6003 &dev->sys_image_guid);
6004 if (err)
6005 return err;
6006
6007 err = mlx5_nic_vport_enable_roce(dev->mdev);
6008 if (err)
6009 return err;
6010
6011 mutex_lock(&mlx5_ib_multiport_mutex);
6012 for (i = 0; i < dev->num_ports; i++) {
6013 bool bound = false;
6014
6015 /* build a stub multiport info struct for the native port. */
6016 if (i == port_num) {
6017 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
6018 if (!mpi) {
6019 mutex_unlock(&mlx5_ib_multiport_mutex);
6020 mlx5_nic_vport_disable_roce(dev->mdev);
6021 return -ENOMEM;
6022 }
6023
6024 mpi->is_master = true;
6025 mpi->mdev = dev->mdev;
6026 mpi->sys_image_guid = dev->sys_image_guid;
6027 dev->port[i].mp.mpi = mpi;
6028 mpi->ibdev = dev;
6029 mpi = NULL;
6030 continue;
6031 }
6032
6033 list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
6034 list) {
6035 if (dev->sys_image_guid == mpi->sys_image_guid &&
6036 (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
6037 bound = mlx5_ib_bind_slave_port(dev, mpi);
6038 }
6039
6040 if (bound) {
c42260f1
VP
6041 dev_dbg(mpi->mdev->device,
6042 "removing port from unaffiliated list.\n");
32f69e4b
DJ
6043 mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
6044 list_del(&mpi->list);
6045 break;
6046 }
6047 }
6048 if (!bound) {
6049 get_port_caps(dev, i + 1);
6050 mlx5_ib_dbg(dev, "no free port found for port %d\n",
6051 i + 1);
6052 }
6053 }
6054
6055 list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
6056 mutex_unlock(&mlx5_ib_multiport_mutex);
6057 return err;
6058}
6059
6060static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
6061{
6062 int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6063 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
6064 port_num + 1);
6065 int i;
6066
6067 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
6068 return;
6069
6070 mutex_lock(&mlx5_ib_multiport_mutex);
6071 for (i = 0; i < dev->num_ports; i++) {
6072 if (dev->port[i].mp.mpi) {
6073 /* Destroy the native port stub */
6074 if (i == port_num) {
6075 kfree(dev->port[i].mp.mpi);
6076 dev->port[i].mp.mpi = NULL;
6077 } else {
6078 mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
6079 mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
6080 }
6081 }
6082 }
6083
6084 mlx5_ib_dbg(dev, "removing from devlist\n");
6085 list_del(&dev->ib_dev_list);
6086 mutex_unlock(&mlx5_ib_multiport_mutex);
6087
6088 mlx5_nic_vport_disable_roce(dev->mdev);
6089}
6090
7be76bef
YH
6091static int var_obj_cleanup(struct ib_uobject *uobject,
6092 enum rdma_remove_reason why,
6093 struct uverbs_attr_bundle *attrs)
6094{
6095 struct mlx5_user_mmap_entry *obj = uobject->object;
6096
6097 rdma_user_mmap_entry_remove(&obj->rdma_entry);
6098 return 0;
6099}
6100
6101static struct mlx5_user_mmap_entry *
6102alloc_var_entry(struct mlx5_ib_ucontext *c)
6103{
6104 struct mlx5_user_mmap_entry *entry;
6105 struct mlx5_var_table *var_table;
6106 u32 page_idx;
6107 int err;
6108
6109 var_table = &to_mdev(c->ibucontext.device)->var_table;
6110 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
6111 if (!entry)
6112 return ERR_PTR(-ENOMEM);
6113
6114 mutex_lock(&var_table->bitmap_lock);
6115 page_idx = find_first_zero_bit(var_table->bitmap,
6116 var_table->num_var_hw_entries);
6117 if (page_idx >= var_table->num_var_hw_entries) {
6118 err = -ENOSPC;
6119 mutex_unlock(&var_table->bitmap_lock);
6120 goto end;
6121 }
6122
6123 set_bit(page_idx, var_table->bitmap);
6124 mutex_unlock(&var_table->bitmap_lock);
6125
6126 entry->address = var_table->hw_start_addr +
6127 (page_idx * var_table->stride_size);
6128 entry->page_idx = page_idx;
6129 entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR;
6130
6131 err = rdma_user_mmap_entry_insert_range(
6132 &c->ibucontext, &entry->rdma_entry, var_table->stride_size,
6133 MLX5_IB_MMAP_OFFSET_START << 16,
6134 (MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1);
6135 if (err)
6136 goto err_insert;
6137
6138 return entry;
6139
6140err_insert:
6141 mutex_lock(&var_table->bitmap_lock);
6142 clear_bit(page_idx, var_table->bitmap);
6143 mutex_unlock(&var_table->bitmap_lock);
6144end:
6145 kfree(entry);
6146 return ERR_PTR(err);
6147}
6148
6149static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
6150 struct uverbs_attr_bundle *attrs)
6151{
6152 struct ib_uobject *uobj = uverbs_attr_get_uobject(
6153 attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
6154 struct mlx5_ib_ucontext *c;
6155 struct mlx5_user_mmap_entry *entry;
6156 u64 mmap_offset;
6157 u32 length;
6158 int err;
6159
6160 c = to_mucontext(ib_uverbs_get_ucontext(attrs));
6161 if (IS_ERR(c))
6162 return PTR_ERR(c);
6163
6164 entry = alloc_var_entry(c);
6165 if (IS_ERR(entry))
6166 return PTR_ERR(entry);
6167
6168 mmap_offset = mlx5_entry_to_mmap_offset(entry);
6169 length = entry->rdma_entry.npages * PAGE_SIZE;
6170 uobj->object = entry;
6171
6172 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
6173 &mmap_offset, sizeof(mmap_offset));
6174 if (err)
6175 goto err;
6176
6177 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
6178 &entry->page_idx, sizeof(entry->page_idx));
6179 if (err)
6180 goto err;
6181
6182 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
6183 &length, sizeof(length));
6184 if (err)
6185 goto err;
6186
6187 return 0;
6188
6189err:
6190 rdma_user_mmap_entry_remove(&entry->rdma_entry);
6191 return err;
6192}
6193
6194DECLARE_UVERBS_NAMED_METHOD(
6195 MLX5_IB_METHOD_VAR_OBJ_ALLOC,
6196 UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE,
6197 MLX5_IB_OBJECT_VAR,
6198 UVERBS_ACCESS_NEW,
6199 UA_MANDATORY),
6200 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
6201 UVERBS_ATTR_TYPE(u32),
6202 UA_MANDATORY),
6203 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
6204 UVERBS_ATTR_TYPE(u32),
6205 UA_MANDATORY),
6206 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
6207 UVERBS_ATTR_TYPE(u64),
6208 UA_MANDATORY));
6209
6210DECLARE_UVERBS_NAMED_METHOD_DESTROY(
6211 MLX5_IB_METHOD_VAR_OBJ_DESTROY,
6212 UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE,
6213 MLX5_IB_OBJECT_VAR,
6214 UVERBS_ACCESS_DESTROY,
6215 UA_MANDATORY));
6216
6217DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR,
6218 UVERBS_TYPE_ALLOC_IDR(var_obj_cleanup),
6219 &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC),
6220 &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY));
6221
6222static bool var_is_supported(struct ib_device *device)
6223{
6224 struct mlx5_ib_dev *dev = to_mdev(device);
6225
6226 return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
6227 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q);
6228}
6229
9a119cd5
JG
6230ADD_UVERBS_ATTRIBUTES_SIMPLE(
6231 mlx5_ib_dm,
6232 UVERBS_OBJECT_DM,
6233 UVERBS_METHOD_DM_ALLOC,
6234 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
6235 UVERBS_ATTR_TYPE(u64),
83bb4442 6236 UA_MANDATORY),
9a119cd5
JG
6237 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
6238 UVERBS_ATTR_TYPE(u16),
3b113a1e
AL
6239 UA_OPTIONAL),
6240 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
6241 enum mlx5_ib_uapi_dm_type,
6242 UA_OPTIONAL));
9a119cd5
JG
6243
6244ADD_UVERBS_ATTRIBUTES_SIMPLE(
6245 mlx5_ib_flow_action,
6246 UVERBS_OBJECT_FLOW_ACTION,
6247 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
bccd0622
JG
6248 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
6249 enum mlx5_ib_uapi_flow_action_flags));
c6475a0b 6250
0cbf432d 6251static const struct uapi_definition mlx5_ib_defs[] = {
36e235c8 6252 UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
0cbf432d 6253 UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
8c84660b 6254
0cbf432d
JG
6255 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
6256 &mlx5_ib_flow_action),
6257 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
7be76bef
YH
6258 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
6259 UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
0cbf432d
JG
6260 {}
6261};
8c84660b 6262
1a1e03dc
RS
6263static int mlx5_ib_read_counters(struct ib_counters *counters,
6264 struct ib_counters_read_attr *read_attr,
6265 struct uverbs_attr_bundle *attrs)
6266{
6267 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
6268 struct mlx5_read_counters_attr mread_attr = {};
6269 struct mlx5_ib_flow_counters_desc *desc;
6270 int ret, i;
6271
6272 mutex_lock(&mcounters->mcntrs_mutex);
6273 if (mcounters->cntrs_max_index > read_attr->ncounters) {
6274 ret = -EINVAL;
6275 goto err_bound;
6276 }
6277
6278 mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
6279 GFP_KERNEL);
6280 if (!mread_attr.out) {
6281 ret = -ENOMEM;
6282 goto err_bound;
6283 }
6284
6285 mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
6286 mread_attr.flags = read_attr->flags;
6287 ret = mcounters->read_counters(counters->device, &mread_attr);
6288 if (ret)
6289 goto err_read;
6290
6291 /* do the pass over the counters data array to assign according to the
6292 * descriptions and indexing pairs
6293 */
6294 desc = mcounters->counters_data;
6295 for (i = 0; i < mcounters->ncounters; i++)
6296 read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
6297
6298err_read:
6299 kfree(mread_attr.out);
6300err_bound:
6301 mutex_unlock(&mcounters->mcntrs_mutex);
6302 return ret;
6303}
6304
b29e2a13
RS
6305static int mlx5_ib_destroy_counters(struct ib_counters *counters)
6306{
6307 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
6308
3b3233fb
RS
6309 counters_clear_description(counters);
6310 if (mcounters->hw_cntrs_hndl)
6311 mlx5_fc_destroy(to_mdev(counters->device)->mdev,
6312 mcounters->hw_cntrs_hndl);
6313
b29e2a13
RS
6314 kfree(mcounters);
6315
6316 return 0;
6317}
6318
6319static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
6320 struct uverbs_attr_bundle *attrs)
6321{
6322 struct mlx5_ib_mcounters *mcounters;
6323
6324 mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL);
6325 if (!mcounters)
6326 return ERR_PTR(-ENOMEM);
6327
3b3233fb
RS
6328 mutex_init(&mcounters->mcntrs_mutex);
6329
b29e2a13
RS
6330 return &mcounters->ibcntrs;
6331}
6332
fb652d32 6333static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
e126ba97 6334{
32f69e4b 6335 mlx5_ib_cleanup_multiport_master(dev);
806b101b 6336 WARN_ON(!xa_empty(&dev->odp_mkeys));
806b101b 6337 cleanup_srcu_struct(&dev->odp_srcu);
4056b12e 6338
50211ec9 6339 WARN_ON(!xa_empty(&dev->sig_mrs));
4056b12e 6340 WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
16c1975f
MB
6341}
6342
fb652d32 6343static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
16c1975f
MB
6344{
6345 struct mlx5_core_dev *mdev = dev->mdev;
e126ba97 6346 int err;
32f69e4b 6347 int i;
e126ba97 6348
32f69e4b
DJ
6349 for (i = 0; i < dev->num_ports; i++) {
6350 spin_lock_init(&dev->port[i].mp.mpi_lock);
95579e78 6351 rwlock_init(&dev->port[i].roce.netdev_lock);
d3b5cc1c
MB
6352 dev->port[i].roce.dev = dev;
6353 dev->port[i].roce.native_port_num = i + 1;
6354 dev->port[i].roce.last_port_state = IB_PORT_DOWN;
32f69e4b
DJ
6355 }
6356
00815752
MS
6357 mlx5_ib_internal_fill_odp_caps(dev);
6358
32f69e4b 6359 err = mlx5_ib_init_multiport_master(dev);
e126ba97 6360 if (err)
da796ccb 6361 return err;
e126ba97 6362
a989ea01
MB
6363 err = set_has_smi_cap(dev);
6364 if (err)
6365 return err;
e126ba97 6366
32f69e4b 6367 if (!mlx5_core_mp_enabled(mdev)) {
32f69e4b
DJ
6368 for (i = 1; i <= dev->num_ports; i++) {
6369 err = get_port_caps(dev, i);
6370 if (err)
6371 break;
6372 }
6373 } else {
6374 err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
6375 }
6376 if (err)
6377 goto err_mp;
6378
1b5daf11
MD
6379 if (mlx5_use_mad_ifc(dev))
6380 get_ext_port_caps(dev);
e126ba97 6381
e126ba97 6382 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
c6790aa9 6383 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
508562d6 6384 dev->ib_dev.phys_port_cnt = dev->num_ports;
f2f3df55 6385 dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
c42260f1 6386 dev->ib_dev.dev.parent = mdev->device;
e126ba97 6387
3cc297db
MB
6388 mutex_init(&dev->cap_mask_mutex);
6389 INIT_LIST_HEAD(&dev->qp_list);
6390 spin_lock_init(&dev->reset_flow_resource_lock);
806b101b 6391 xa_init(&dev->odp_mkeys);
50211ec9 6392 xa_init(&dev->sig_mrs);
fc6a9f86 6393 spin_lock_init(&dev->mkey_lock);
3cc297db 6394
3b113a1e
AL
6395 spin_lock_init(&dev->dm.lock);
6396 dev->dm.dev = mdev;
24da0016 6397
806b101b
JG
6398 err = init_srcu_struct(&dev->odp_srcu);
6399 if (err)
6400 goto err_mp;
3cc297db 6401
16c1975f 6402 return 0;
25c13324 6403
32f69e4b
DJ
6404err_mp:
6405 mlx5_ib_cleanup_multiport_master(dev);
16c1975f 6406
16c1975f
MB
6407 return -ENOMEM;
6408}
6409
9a4ca38d
MB
6410static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
6411{
6412 dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
6413
6414 if (!dev->flow_db)
6415 return -ENOMEM;
6416
6417 mutex_init(&dev->flow_db->lock);
6418
6419 return 0;
6420}
6421
6422static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
6423{
6424 kfree(dev->flow_db);
6425}
6426
96458233 6427static const struct ib_device_ops mlx5_ib_dev_ops = {
7a154142 6428 .owner = THIS_MODULE,
b9560a41 6429 .driver_id = RDMA_DRIVER_MLX5,
72c6ec18 6430 .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION,
b9560a41 6431
96458233
KH
6432 .add_gid = mlx5_ib_add_gid,
6433 .alloc_mr = mlx5_ib_alloc_mr,
6c984472 6434 .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
96458233
KH
6435 .alloc_pd = mlx5_ib_alloc_pd,
6436 .alloc_ucontext = mlx5_ib_alloc_ucontext,
6437 .attach_mcast = mlx5_ib_mcg_attach,
6438 .check_mr_status = mlx5_ib_check_mr_status,
6439 .create_ah = mlx5_ib_create_ah,
6440 .create_counters = mlx5_ib_create_counters,
6441 .create_cq = mlx5_ib_create_cq,
6442 .create_flow = mlx5_ib_create_flow,
6443 .create_qp = mlx5_ib_create_qp,
6444 .create_srq = mlx5_ib_create_srq,
6445 .dealloc_pd = mlx5_ib_dealloc_pd,
6446 .dealloc_ucontext = mlx5_ib_dealloc_ucontext,
6447 .del_gid = mlx5_ib_del_gid,
6448 .dereg_mr = mlx5_ib_dereg_mr,
6449 .destroy_ah = mlx5_ib_destroy_ah,
6450 .destroy_counters = mlx5_ib_destroy_counters,
6451 .destroy_cq = mlx5_ib_destroy_cq,
6452 .destroy_flow = mlx5_ib_destroy_flow,
6453 .destroy_flow_action = mlx5_ib_destroy_flow_action,
6454 .destroy_qp = mlx5_ib_destroy_qp,
6455 .destroy_srq = mlx5_ib_destroy_srq,
6456 .detach_mcast = mlx5_ib_mcg_detach,
6457 .disassociate_ucontext = mlx5_ib_disassociate_ucontext,
6458 .drain_rq = mlx5_ib_drain_rq,
6459 .drain_sq = mlx5_ib_drain_sq,
11f552e2 6460 .enable_driver = mlx5_ib_enable_driver,
e1b95ae0 6461 .fill_res_entry = mlx5_ib_fill_res_entry,
4061ff7a 6462 .fill_stat_entry = mlx5_ib_fill_stat_entry,
96458233
KH
6463 .get_dev_fw_str = get_dev_fw_str,
6464 .get_dma_mr = mlx5_ib_get_dma_mr,
6465 .get_link_layer = mlx5_ib_port_link_layer,
6466 .map_mr_sg = mlx5_ib_map_mr_sg,
6c984472 6467 .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
96458233 6468 .mmap = mlx5_ib_mmap,
dc2316eb 6469 .mmap_free = mlx5_ib_mmap_free,
96458233
KH
6470 .modify_cq = mlx5_ib_modify_cq,
6471 .modify_device = mlx5_ib_modify_device,
6472 .modify_port = mlx5_ib_modify_port,
6473 .modify_qp = mlx5_ib_modify_qp,
6474 .modify_srq = mlx5_ib_modify_srq,
6475 .poll_cq = mlx5_ib_poll_cq,
6476 .post_recv = mlx5_ib_post_recv,
6477 .post_send = mlx5_ib_post_send,
6478 .post_srq_recv = mlx5_ib_post_srq_recv,
6479 .process_mad = mlx5_ib_process_mad,
6480 .query_ah = mlx5_ib_query_ah,
6481 .query_device = mlx5_ib_query_device,
6482 .query_gid = mlx5_ib_query_gid,
6483 .query_pkey = mlx5_ib_query_pkey,
6484 .query_qp = mlx5_ib_query_qp,
6485 .query_srq = mlx5_ib_query_srq,
6486 .read_counters = mlx5_ib_read_counters,
6487 .reg_user_mr = mlx5_ib_reg_user_mr,
6488 .req_notify_cq = mlx5_ib_arm_cq,
6489 .rereg_user_mr = mlx5_ib_rereg_user_mr,
6490 .resize_cq = mlx5_ib_resize_cq,
d3456914
LR
6491
6492 INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
e39afe3d 6493 INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
21a428a0 6494 INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
68e326de 6495 INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
a2a074ef 6496 INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
96458233
KH
6497};
6498
6499static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = {
6500 .create_flow_action_esp = mlx5_ib_create_flow_action_esp,
6501 .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
6502};
6503
6504static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
6505 .rdma_netdev_get_params = mlx5_ib_rn_get_params,
6506};
6507
6508static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
6509 .get_vf_config = mlx5_ib_get_vf_config,
9c0015ef 6510 .get_vf_guid = mlx5_ib_get_vf_guid,
96458233
KH
6511 .get_vf_stats = mlx5_ib_get_vf_stats,
6512 .set_vf_guid = mlx5_ib_set_vf_guid,
6513 .set_vf_link_state = mlx5_ib_set_vf_link_state,
6514};
6515
6516static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
6517 .alloc_mw = mlx5_ib_alloc_mw,
6518 .dealloc_mw = mlx5_ib_dealloc_mw,
6519};
6520
6521static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
6522 .alloc_xrcd = mlx5_ib_alloc_xrcd,
6523 .dealloc_xrcd = mlx5_ib_dealloc_xrcd,
6524};
6525
6526static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
6527 .alloc_dm = mlx5_ib_alloc_dm,
6528 .dealloc_dm = mlx5_ib_dealloc_dm,
6529 .reg_dm_mr = mlx5_ib_reg_dm_mr,
6530};
6531
f164be8c
YH
6532static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
6533{
6534 struct mlx5_core_dev *mdev = dev->mdev;
6535 struct mlx5_var_table *var_table = &dev->var_table;
6536 u8 log_doorbell_bar_size;
6537 u8 log_doorbell_stride;
6538 u64 bar_size;
6539
6540 log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
6541 log_doorbell_bar_size);
6542 log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
6543 log_doorbell_stride);
6544 var_table->hw_start_addr = dev->mdev->bar_addr +
6545 MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
6546 doorbell_bar_offset);
6547 bar_size = (1ULL << log_doorbell_bar_size) * 4096;
6548 var_table->stride_size = 1ULL << log_doorbell_stride;
685eff51 6549 var_table->num_var_hw_entries = div64_u64(bar_size, var_table->stride_size);
f164be8c
YH
6550 mutex_init(&var_table->bitmap_lock);
6551 var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
6552 GFP_KERNEL);
6553 return (var_table->bitmap) ? 0 : -ENOMEM;
6554}
6555
6556static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
6557{
6558 bitmap_free(dev->var_table.bitmap);
6559}
6560
fb652d32 6561static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
16c1975f
MB
6562{
6563 struct mlx5_core_dev *mdev = dev->mdev;
16c1975f
MB
6564 int err;
6565
e126ba97
EC
6566 dev->ib_dev.uverbs_cmd_mask =
6567 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
6568 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
6569 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
6570 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
6571 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
41c450fd
MS
6572 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
6573 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
e126ba97 6574 (1ull << IB_USER_VERBS_CMD_REG_MR) |
56e11d62 6575 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
e126ba97
EC
6576 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
6577 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
6578 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
6579 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
6580 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
6581 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
6582 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
6583 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
6584 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
6585 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
6586 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
6587 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
6588 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
6589 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
6590 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
6591 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
6592 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
1707cb4a 6593 dev->ib_dev.uverbs_ex_cmd_mask =
d4584ddf
MB
6594 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
6595 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
7d29f349 6596 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
b0e9df6d 6597 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
96458233
KH
6598 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) |
6599 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
6600 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
6601
f6a8a19b
DD
6602 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
6603 IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
96458233
KH
6604 ib_set_device_ops(&dev->ib_dev,
6605 &mlx5_ib_dev_ipoib_enhanced_ops);
8e959601 6606
96458233
KH
6607 if (mlx5_core_is_pf(mdev))
6608 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
7c2344c3 6609
6e8484c5
MG
6610 dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
6611
d2370e0a 6612 if (MLX5_CAP_GEN(mdev, imaicl)) {
d2370e0a
MB
6613 dev->ib_dev.uverbs_cmd_mask |=
6614 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
6615 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
96458233 6616 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
d2370e0a
MB
6617 }
6618
938fe83c 6619 if (MLX5_CAP_GEN(mdev, xrc)) {
e126ba97
EC
6620 dev->ib_dev.uverbs_cmd_mask |=
6621 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
6622 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
96458233 6623 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
e126ba97
EC
6624 }
6625
25c13324
AL
6626 if (MLX5_CAP_DEV_MEM(mdev, memic) ||
6627 MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
6628 MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
96458233 6629 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
24da0016 6630
dfb631a1 6631 if (mlx5_accel_ipsec_device_caps(dev->mdev) &
96458233
KH
6632 MLX5_ACCEL_IPSEC_CAP_DEVICE)
6633 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops);
96458233 6634 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
81e30880 6635
36e235c8
JG
6636 if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
6637 dev->ib_dev.driver_def = mlx5_ib_defs;
81e30880 6638
e126ba97
EC
6639 err = init_node_data(dev);
6640 if (err)
16c1975f 6641 return err;
e126ba97 6642
c8b89924 6643 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
e7996a9a
JG
6644 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
6645 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
a560f1d9 6646 mutex_init(&dev->lb.mutex);
c8b89924 6647
f164be8c
YH
6648 if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
6649 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
6650 err = mlx5_ib_init_var_table(dev);
6651 if (err)
6652 return err;
6653 }
6654
96e2fd73
LR
6655 dev->ib_dev.use_cq_dim = true;
6656
16c1975f
MB
6657 return 0;
6658}
6659
96458233
KH
6660static const struct ib_device_ops mlx5_ib_dev_port_ops = {
6661 .get_port_immutable = mlx5_port_immutable,
6662 .query_port = mlx5_ib_query_port,
6663};
6664
8e6efa3a
MB
6665static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
6666{
96458233 6667 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
8e6efa3a
MB
6668 return 0;
6669}
6670
96458233
KH
6671static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
6672 .get_port_immutable = mlx5_port_rep_immutable,
6673 .query_port = mlx5_ib_rep_query_port,
6674};
6675
b5a498ba 6676static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
8e6efa3a 6677{
96458233 6678 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
8e6efa3a
MB
6679 return 0;
6680}
6681
96458233
KH
6682static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
6683 .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
6684 .create_wq = mlx5_ib_create_wq,
6685 .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
6686 .destroy_wq = mlx5_ib_destroy_wq,
6687 .get_netdev = mlx5_ib_get_netdev,
6688 .modify_wq = mlx5_ib_modify_wq,
6689};
6690
e3f1ed1f 6691static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
8e6efa3a 6692{
e3f1ed1f 6693 u8 port_num;
8e6efa3a 6694
8e6efa3a
MB
6695 dev->ib_dev.uverbs_ex_cmd_mask |=
6696 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
6697 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
6698 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
6699 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
6700 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
96458233 6701 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
8e6efa3a 6702
e3f1ed1f
LR
6703 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6704
26628e2d 6705 /* Register only for native ports */
8e6efa3a
MB
6706 return mlx5_add_netdev_notifier(dev, port_num);
6707}
6708
6709static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
6710{
6711 u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6712
6713 mlx5_remove_netdev_notifier(dev, port_num);
6714}
6715
b5a498ba 6716static int mlx5_ib_stage_raw_eth_roce_init(struct mlx5_ib_dev *dev)
8e6efa3a
MB
6717{
6718 struct mlx5_core_dev *mdev = dev->mdev;
6719 enum rdma_link_layer ll;
6720 int port_type_cap;
6721 int err = 0;
8e6efa3a 6722
8e6efa3a
MB
6723 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6724 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6725
6726 if (ll == IB_LINK_LAYER_ETHERNET)
e3f1ed1f 6727 err = mlx5_ib_stage_common_roce_init(dev);
8e6efa3a
MB
6728
6729 return err;
6730}
6731
b5a498ba 6732static void mlx5_ib_stage_raw_eth_roce_cleanup(struct mlx5_ib_dev *dev)
8e6efa3a
MB
6733{
6734 mlx5_ib_stage_common_roce_cleanup(dev);
6735}
6736
16c1975f
MB
6737static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
6738{
6739 struct mlx5_core_dev *mdev = dev->mdev;
6740 enum rdma_link_layer ll;
6741 int port_type_cap;
6742 int err;
6743
6744 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6745 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6746
fc24fc5e 6747 if (ll == IB_LINK_LAYER_ETHERNET) {
e3f1ed1f 6748 err = mlx5_ib_stage_common_roce_init(dev);
8e6efa3a
MB
6749 if (err)
6750 return err;
7fd8aefb 6751
e3f1ed1f 6752 err = mlx5_enable_eth(dev);
fc24fc5e 6753 if (err)
8e6efa3a 6754 goto cleanup;
fc24fc5e
AS
6755 }
6756
16c1975f 6757 return 0;
8e6efa3a
MB
6758cleanup:
6759 mlx5_ib_stage_common_roce_cleanup(dev);
6760
6761 return err;
16c1975f 6762}
e126ba97 6763
16c1975f
MB
6764static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
6765{
6766 struct mlx5_core_dev *mdev = dev->mdev;
6767 enum rdma_link_layer ll;
6768 int port_type_cap;
e126ba97 6769
16c1975f
MB
6770 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6771 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6772
6773 if (ll == IB_LINK_LAYER_ETHERNET) {
6774 mlx5_disable_eth(dev);
8e6efa3a 6775 mlx5_ib_stage_common_roce_cleanup(dev);
45bded2c 6776 }
16c1975f 6777}
6aec21f6 6778
fb652d32 6779static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
16c1975f
MB
6780{
6781 return create_dev_resources(&dev->devr);
6782}
6783
fb652d32 6784static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
16c1975f
MB
6785{
6786 destroy_dev_resources(&dev->devr);
6787}
6788
6789static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
6790{
6791 return mlx5_ib_odp_init_one(dev);
6792}
4a2da0b8 6793
f3ffed0c 6794static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
d5d284b8
SM
6795{
6796 mlx5_ib_odp_cleanup_one(dev);
6797}
6798
96458233
KH
6799static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = {
6800 .alloc_hw_stats = mlx5_ib_alloc_hw_stats,
6801 .get_hw_stats = mlx5_ib_get_hw_stats,
45842fc6
MZ
6802 .counter_bind_qp = mlx5_ib_counter_bind_qp,
6803 .counter_unbind_qp = mlx5_ib_counter_unbind_qp,
6804 .counter_dealloc = mlx5_ib_counter_dealloc,
18d422ce
MZ
6805 .counter_alloc_stats = mlx5_ib_counter_alloc_stats,
6806 .counter_update_stats = mlx5_ib_counter_update_stats,
96458233
KH
6807};
6808
fb652d32 6809static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
16c1975f 6810{
5e1e7612 6811 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
96458233 6812 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops);
5e1e7612
MB
6813
6814 return mlx5_ib_alloc_counters(dev);
6815 }
16c1975f
MB
6816
6817 return 0;
6818}
6819
fb652d32 6820static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
16c1975f
MB
6821{
6822 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
6823 mlx5_ib_dealloc_counters(dev);
6824}
6825
6826static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
6827{
73eb8f03
GKH
6828 mlx5_ib_init_cong_debugfs(dev,
6829 mlx5_core_native_port_num(dev->mdev) - 1);
6830 return 0;
16c1975f
MB
6831}
6832
6833static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
6834{
a9e546e7
PP
6835 mlx5_ib_cleanup_cong_debugfs(dev,
6836 mlx5_core_native_port_num(dev->mdev) - 1);
16c1975f
MB
6837}
6838
6839static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
6840{
5fe9dec0 6841 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
444261ca 6842 return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
16c1975f
MB
6843}
6844
6845static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
6846{
6847 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
6848}
6849
fb652d32 6850static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
16c1975f
MB
6851{
6852 int err;
5fe9dec0
EC
6853
6854 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
6855 if (err)
16c1975f 6856 return err;
5fe9dec0
EC
6857
6858 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
6859 if (err)
16c1975f 6860 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
5fe9dec0 6861
16c1975f
MB
6862 return err;
6863}
0837e86a 6864
fb652d32 6865static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
16c1975f
MB
6866{
6867 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
6868 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
6869}
e126ba97 6870
fb652d32 6871static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
16c1975f 6872{
e349f858
JG
6873 const char *name;
6874
508a523f 6875 rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
7c34ec19 6876 if (!mlx5_lag_is_roce(dev->mdev))
e349f858
JG
6877 name = "mlx5_%d";
6878 else
6879 name = "mlx5_bond_%d";
ea4baf7f 6880 return ib_register_device(&dev->ib_dev, name);
16c1975f
MB
6881}
6882
fb652d32 6883static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
16c1975f 6884{
42cea83f 6885 destroy_umrc_res(dev);
16c1975f
MB
6886}
6887
fb652d32 6888static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
16c1975f 6889{
42cea83f 6890 ib_unregister_device(&dev->ib_dev);
16c1975f
MB
6891}
6892
fb652d32 6893static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
16c1975f 6894{
42cea83f 6895 return create_umr_res(dev);
16c1975f
MB
6896}
6897
6898static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
6899{
03404e8a
MG
6900 init_delay_drop(dev);
6901
16c1975f
MB
6902 return 0;
6903}
6904
6905static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
6906{
6907 cancel_delay_drop(dev);
6908}
6909
df097a27
SM
6910static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
6911{
6912 dev->mdev_events.notifier_call = mlx5_ib_event;
6913 mlx5_notifier_register(dev->mdev, &dev->mdev_events);
6914 return 0;
6915}
6916
6917static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
6918{
6919 mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
6920}
6921
81773ce5
LR
6922static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev)
6923{
6924 int uid;
6925
fb98153b 6926 uid = mlx5_ib_devx_create(dev, false);
e337dd53 6927 if (uid > 0) {
81773ce5 6928 dev->devx_whitelist_uid = uid;
e337dd53
YH
6929 mlx5_ib_devx_init_event_table(dev);
6930 }
81773ce5
LR
6931
6932 return 0;
6933}
6934static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
6935{
e337dd53
YH
6936 if (dev->devx_whitelist_uid) {
6937 mlx5_ib_devx_cleanup_event_table(dev);
81773ce5 6938 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
e337dd53 6939 }
81773ce5
LR
6940}
6941
11f552e2
MG
6942int mlx5_ib_enable_driver(struct ib_device *dev)
6943{
6944 struct mlx5_ib_dev *mdev = to_mdev(dev);
6945 int ret;
6946
6947 ret = mlx5_ib_test_wc(mdev);
6948 mlx5_ib_dbg(mdev, "Write-Combining %s",
6949 mdev->wc_support ? "supported" : "not supported");
6950
6951 return ret;
6952}
6953
b5ca15ad
MB
6954void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
6955 const struct mlx5_ib_profile *profile,
6956 int stage)
16c1975f 6957{
4cca96a8
PP
6958 dev->ib_active = false;
6959
16c1975f
MB
6960 /* Number of stages to cleanup */
6961 while (stage) {
6962 stage--;
6963 if (profile->stage[stage].cleanup)
6964 profile->stage[stage].cleanup(dev);
6965 }
4a6dc855 6966
da796ccb 6967 kfree(dev->port);
4a6dc855 6968 ib_dealloc_device(&dev->ib_dev);
16c1975f 6969}
e126ba97 6970
b5ca15ad
MB
6971void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
6972 const struct mlx5_ib_profile *profile)
16c1975f 6973{
16c1975f
MB
6974 int err;
6975 int i;
5fe9dec0 6976
16c1975f
MB
6977 for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
6978 if (profile->stage[i].init) {
6979 err = profile->stage[i].init(dev);
6980 if (err)
6981 goto err_out;
6982 }
6983 }
0837e86a 6984
16c1975f
MB
6985 dev->profile = profile;
6986 dev->ib_active = true;
6aec21f6 6987
16c1975f 6988 return dev;
e126ba97 6989
16c1975f
MB
6990err_out:
6991 __mlx5_ib_remove(dev, profile, i);
fc24fc5e 6992
16c1975f
MB
6993 return NULL;
6994}
0837e86a 6995
16c1975f
MB
6996static const struct mlx5_ib_profile pf_profile = {
6997 STAGE_CREATE(MLX5_IB_STAGE_INIT,
6998 mlx5_ib_stage_init_init,
6999 mlx5_ib_stage_init_cleanup),
9a4ca38d
MB
7000 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
7001 mlx5_ib_stage_flow_db_init,
7002 mlx5_ib_stage_flow_db_cleanup),
16c1975f
MB
7003 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
7004 mlx5_ib_stage_caps_init,
f164be8c 7005 mlx5_ib_stage_caps_cleanup),
8e6efa3a
MB
7006 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
7007 mlx5_ib_stage_non_default_cb,
7008 NULL),
16c1975f
MB
7009 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
7010 mlx5_ib_stage_roce_init,
7011 mlx5_ib_stage_roce_cleanup),
f3da6577
LR
7012 STAGE_CREATE(MLX5_IB_STAGE_SRQ,
7013 mlx5_init_srq_table,
7014 mlx5_cleanup_srq_table),
16c1975f
MB
7015 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
7016 mlx5_ib_stage_dev_res_init,
7017 mlx5_ib_stage_dev_res_cleanup),
df097a27
SM
7018 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
7019 mlx5_ib_stage_dev_notifier_init,
7020 mlx5_ib_stage_dev_notifier_cleanup),
16c1975f
MB
7021 STAGE_CREATE(MLX5_IB_STAGE_ODP,
7022 mlx5_ib_stage_odp_init,
d5d284b8 7023 mlx5_ib_stage_odp_cleanup),
16c1975f
MB
7024 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
7025 mlx5_ib_stage_counters_init,
7026 mlx5_ib_stage_counters_cleanup),
7027 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
7028 mlx5_ib_stage_cong_debugfs_init,
7029 mlx5_ib_stage_cong_debugfs_cleanup),
7030 STAGE_CREATE(MLX5_IB_STAGE_UAR,
7031 mlx5_ib_stage_uar_init,
7032 mlx5_ib_stage_uar_cleanup),
7033 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
7034 mlx5_ib_stage_bfrag_init,
7035 mlx5_ib_stage_bfrag_cleanup),
42cea83f
MB
7036 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
7037 NULL,
7038 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
81773ce5
LR
7039 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
7040 mlx5_ib_stage_devx_init,
7041 mlx5_ib_stage_devx_cleanup),
16c1975f
MB
7042 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
7043 mlx5_ib_stage_ib_reg_init,
7044 mlx5_ib_stage_ib_reg_cleanup),
42cea83f
MB
7045 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
7046 mlx5_ib_stage_post_ib_reg_umr_init,
7047 NULL),
16c1975f
MB
7048 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
7049 mlx5_ib_stage_delay_drop_init,
7050 mlx5_ib_stage_delay_drop_cleanup),
16c1975f 7051};
e126ba97 7052
b5a498ba 7053const struct mlx5_ib_profile raw_eth_profile = {
b5ca15ad
MB
7054 STAGE_CREATE(MLX5_IB_STAGE_INIT,
7055 mlx5_ib_stage_init_init,
7056 mlx5_ib_stage_init_cleanup),
7057 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
7058 mlx5_ib_stage_flow_db_init,
7059 mlx5_ib_stage_flow_db_cleanup),
7060 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
7061 mlx5_ib_stage_caps_init,
f164be8c 7062 mlx5_ib_stage_caps_cleanup),
b5ca15ad 7063 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
b5a498ba 7064 mlx5_ib_stage_raw_eth_non_default_cb,
b5ca15ad
MB
7065 NULL),
7066 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
b5a498ba
MG
7067 mlx5_ib_stage_raw_eth_roce_init,
7068 mlx5_ib_stage_raw_eth_roce_cleanup),
f3da6577
LR
7069 STAGE_CREATE(MLX5_IB_STAGE_SRQ,
7070 mlx5_init_srq_table,
7071 mlx5_cleanup_srq_table),
b5ca15ad
MB
7072 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
7073 mlx5_ib_stage_dev_res_init,
7074 mlx5_ib_stage_dev_res_cleanup),
df097a27
SM
7075 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
7076 mlx5_ib_stage_dev_notifier_init,
7077 mlx5_ib_stage_dev_notifier_cleanup),
b5ca15ad
MB
7078 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
7079 mlx5_ib_stage_counters_init,
7080 mlx5_ib_stage_counters_cleanup),
7081 STAGE_CREATE(MLX5_IB_STAGE_UAR,
7082 mlx5_ib_stage_uar_init,
7083 mlx5_ib_stage_uar_cleanup),
7084 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
7085 mlx5_ib_stage_bfrag_init,
7086 mlx5_ib_stage_bfrag_cleanup),
03fe2deb
DM
7087 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
7088 NULL,
7089 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
7f575103
MB
7090 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
7091 mlx5_ib_stage_devx_init,
7092 mlx5_ib_stage_devx_cleanup),
b5ca15ad
MB
7093 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
7094 mlx5_ib_stage_ib_reg_init,
7095 mlx5_ib_stage_ib_reg_cleanup),
03fe2deb
DM
7096 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
7097 mlx5_ib_stage_post_ib_reg_umr_init,
7098 NULL),
b5ca15ad
MB
7099};
7100
e3f1ed1f 7101static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
32f69e4b
DJ
7102{
7103 struct mlx5_ib_multiport_info *mpi;
7104 struct mlx5_ib_dev *dev;
7105 bool bound = false;
7106 int err;
7107
7108 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
7109 if (!mpi)
7110 return NULL;
7111
7112 mpi->mdev = mdev;
7113
7114 err = mlx5_query_nic_vport_system_image_guid(mdev,
7115 &mpi->sys_image_guid);
7116 if (err) {
7117 kfree(mpi);
7118 return NULL;
7119 }
7120
7121 mutex_lock(&mlx5_ib_multiport_mutex);
7122 list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
7123 if (dev->sys_image_guid == mpi->sys_image_guid)
7124 bound = mlx5_ib_bind_slave_port(dev, mpi);
7125
7126 if (bound) {
7127 rdma_roce_rescan_device(&dev->ib_dev);
7128 break;
7129 }
7130 }
7131
7132 if (!bound) {
7133 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
c42260f1
VP
7134 dev_dbg(mdev->device,
7135 "no suitable IB device found to bind to, added to unaffiliated list.\n");
32f69e4b
DJ
7136 }
7137 mutex_unlock(&mlx5_ib_multiport_mutex);
7138
7139 return mpi;
7140}
7141
16c1975f
MB
7142static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
7143{
94de879c 7144 const struct mlx5_ib_profile *profile;
32f69e4b 7145 enum rdma_link_layer ll;
b5ca15ad 7146 struct mlx5_ib_dev *dev;
32f69e4b 7147 int port_type_cap;
da796ccb 7148 int num_ports;
32f69e4b 7149
b5ca15ad
MB
7150 printk_once(KERN_INFO "%s", mlx5_version);
7151
f0666f1f 7152 if (MLX5_ESWITCH_MANAGER(mdev) &&
f6455de0 7153 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
5fb58c9e
MB
7154 if (!mlx5_core_mp_enabled(mdev))
7155 mlx5_ib_register_vport_reps(mdev);
f0666f1f
BW
7156 return mdev;
7157 }
7158
32f69e4b
DJ
7159 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
7160 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
7161
e3f1ed1f
LR
7162 if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
7163 return mlx5_ib_add_slave_port(mdev);
32f69e4b 7164
da796ccb
MB
7165 num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
7166 MLX5_CAP_GEN(mdev, num_vhca_ports));
459cc69f 7167 dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
b5ca15ad
MB
7168 if (!dev)
7169 return NULL;
da796ccb
MB
7170 dev->port = kcalloc(num_ports, sizeof(*dev->port),
7171 GFP_KERNEL);
7172 if (!dev->port) {
a5c9c299 7173 ib_dealloc_device(&dev->ib_dev);
da796ccb
MB
7174 return NULL;
7175 }
b5ca15ad
MB
7176
7177 dev->mdev = mdev;
da796ccb 7178 dev->num_ports = num_ports;
b5ca15ad 7179
94de879c
MG
7180 if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
7181 profile = &raw_eth_profile;
7182 else
7183 profile = &pf_profile;
7184
7185 return __mlx5_ib_add(dev, profile);
e126ba97
EC
7186}
7187
9603b61d 7188static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
e126ba97 7189{
32f69e4b
DJ
7190 struct mlx5_ib_multiport_info *mpi;
7191 struct mlx5_ib_dev *dev;
7192
f0666f1f
BW
7193 if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) {
7194 mlx5_ib_unregister_vport_reps(mdev);
7195 return;
7196 }
7197
32f69e4b
DJ
7198 if (mlx5_core_is_mp_slave(mdev)) {
7199 mpi = context;
7200 mutex_lock(&mlx5_ib_multiport_mutex);
7201 if (mpi->ibdev)
7202 mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
7203 list_del(&mpi->list);
7204 mutex_unlock(&mlx5_ib_multiport_mutex);
5d44adeb 7205 kfree(mpi);
32f69e4b
DJ
7206 return;
7207 }
6aec21f6 7208
32f69e4b 7209 dev = context;
f0666f1f 7210 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
e126ba97
EC
7211}
7212
9603b61d
JM
7213static struct mlx5_interface mlx5_ib_interface = {
7214 .add = mlx5_ib_add,
7215 .remove = mlx5_ib_remove,
64613d94 7216 .protocol = MLX5_INTERFACE_PROTOCOL_IB,
e126ba97
EC
7217};
7218
c44ef998
IL
7219unsigned long mlx5_ib_get_xlt_emergency_page(void)
7220{
7221 mutex_lock(&xlt_emergency_page_mutex);
7222 return xlt_emergency_page;
7223}
7224
7225void mlx5_ib_put_xlt_emergency_page(void)
7226{
7227 mutex_unlock(&xlt_emergency_page_mutex);
7228}
7229
e126ba97
EC
7230static int __init mlx5_ib_init(void)
7231{
6aec21f6
HE
7232 int err;
7233
c44ef998
IL
7234 xlt_emergency_page = __get_free_page(GFP_KERNEL);
7235 if (!xlt_emergency_page)
7236 return -ENOMEM;
7237
7238 mutex_init(&xlt_emergency_page_mutex);
7239
d69a24e0 7240 mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
c44ef998
IL
7241 if (!mlx5_ib_event_wq) {
7242 free_page(xlt_emergency_page);
d69a24e0 7243 return -ENOMEM;
c44ef998 7244 }
d69a24e0 7245
81713d37 7246 mlx5_ib_odp_init();
9603b61d 7247
6aec21f6 7248 err = mlx5_register_interface(&mlx5_ib_interface);
6aec21f6 7249
6aec21f6 7250 return err;
e126ba97
EC
7251}
7252
7253static void __exit mlx5_ib_cleanup(void)
7254{
9603b61d 7255 mlx5_unregister_interface(&mlx5_ib_interface);
d69a24e0 7256 destroy_workqueue(mlx5_ib_event_wq);
c44ef998
IL
7257 mutex_destroy(&xlt_emergency_page_mutex);
7258 free_page(xlt_emergency_page);
e126ba97
EC
7259}
7260
7261module_init(mlx5_ib_init);
7262module_exit(mlx5_ib_cleanup);