]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/infiniband/hw/mlx5/srq_cmd.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / infiniband / hw / mlx5 / srq_cmd.c
CommitLineData
f02d0d6e
LR
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/mlx5/driver.h>
8#include <linux/mlx5/cmd.h>
b4990804 9#include "mlx5_ib.h"
f02d0d6e
LR
10#include "srq.h"
11
12static int get_pas_size(struct mlx5_srq_attr *in)
13{
14 u32 log_page_size = in->log_page_size + 12;
15 u32 log_srq_size = in->log_size;
16 u32 log_rq_stride = in->wqe_shift;
17 u32 page_offset = in->page_offset;
18 u32 po_quanta = 1 << (log_page_size - 6);
19 u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
20 u32 page_size = 1 << log_page_size;
21 u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
22 u32 rq_num_pas = DIV_ROUND_UP(rq_sz_po, page_size);
23
24 return rq_num_pas * sizeof(u64);
25}
26
27static void set_wq(void *wq, struct mlx5_srq_attr *in)
28{
29 MLX5_SET(wq, wq, wq_signature, !!(in->flags
30 & MLX5_SRQ_FLAG_WQ_SIG));
31 MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
32 MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
33 MLX5_SET(wq, wq, log_wq_sz, in->log_size);
34 MLX5_SET(wq, wq, page_offset, in->page_offset);
35 MLX5_SET(wq, wq, lwm, in->lwm);
36 MLX5_SET(wq, wq, pd, in->pd);
37 MLX5_SET64(wq, wq, dbr_addr, in->db_record);
38}
39
40static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
41{
42 MLX5_SET(srqc, srqc, wq_signature, !!(in->flags
43 & MLX5_SRQ_FLAG_WQ_SIG));
44 MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
45 MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
46 MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
47 MLX5_SET(srqc, srqc, page_offset, in->page_offset);
48 MLX5_SET(srqc, srqc, lwm, in->lwm);
49 MLX5_SET(srqc, srqc, pd, in->pd);
50 MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
51 MLX5_SET(srqc, srqc, xrcd, in->xrcd);
52 MLX5_SET(srqc, srqc, cqn, in->cqn);
53}
54
55static void get_wq(void *wq, struct mlx5_srq_attr *in)
56{
57 if (MLX5_GET(wq, wq, wq_signature))
58 in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
59 in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
60 in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
61 in->log_size = MLX5_GET(wq, wq, log_wq_sz);
62 in->page_offset = MLX5_GET(wq, wq, page_offset);
63 in->lwm = MLX5_GET(wq, wq, lwm);
64 in->pd = MLX5_GET(wq, wq, pd);
65 in->db_record = MLX5_GET64(wq, wq, dbr_addr);
66}
67
68static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
69{
70 if (MLX5_GET(srqc, srqc, wq_signature))
71 in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
72 in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
73 in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
74 in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
75 in->page_offset = MLX5_GET(srqc, srqc, page_offset);
76 in->lwm = MLX5_GET(srqc, srqc, lwm);
77 in->pd = MLX5_GET(srqc, srqc, pd);
78 in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
79}
80
b4990804 81struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
f02d0d6e 82{
f3da6577 83 struct mlx5_srq_table *table = &dev->srq_table;
f02d0d6e
LR
84 struct mlx5_core_srq *srq;
85
86 spin_lock(&table->lock);
87
88 srq = radix_tree_lookup(&table->tree, srqn);
89 if (srq)
10f56242 90 atomic_inc(&srq->common.refcount);
f02d0d6e
LR
91
92 spin_unlock(&table->lock);
93
94 return srq;
95}
96
b4990804 97static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
f02d0d6e
LR
98 struct mlx5_srq_attr *in)
99{
100 u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
101 void *create_in;
102 void *srqc;
103 void *pas;
104 int pas_size;
105 int inlen;
106 int err;
107
108 pas_size = get_pas_size(in);
109 inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
110 create_in = kvzalloc(inlen, GFP_KERNEL);
111 if (!create_in)
112 return -ENOMEM;
113
114 MLX5_SET(create_srq_in, create_in, uid, in->uid);
115 srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
116 pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
117
118 set_srqc(srqc, in);
119 memcpy(pas, in->pas, pas_size);
120
121 MLX5_SET(create_srq_in, create_in, opcode,
122 MLX5_CMD_OP_CREATE_SRQ);
123
b4990804 124 err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
f02d0d6e
LR
125 sizeof(create_out));
126 kvfree(create_in);
127 if (!err) {
128 srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
129 srq->uid = in->uid;
130 }
131
132 return err;
133}
134
b4990804 135static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
f02d0d6e
LR
136{
137 u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
138 u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
139
140 MLX5_SET(destroy_srq_in, srq_in, opcode,
141 MLX5_CMD_OP_DESTROY_SRQ);
142 MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
143 MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid);
144
b4990804
LR
145 return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
146 sizeof(srq_out));
f02d0d6e
LR
147}
148
b4990804 149static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
f02d0d6e
LR
150 u16 lwm, int is_srq)
151{
152 u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
153 u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
154
155 MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
156 MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
157 MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
158 MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
159 MLX5_SET(arm_rq_in, srq_in, uid, srq->uid);
160
b4990804
LR
161 return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
162 sizeof(srq_out));
f02d0d6e
LR
163}
164
b4990804 165static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
f02d0d6e
LR
166 struct mlx5_srq_attr *out)
167{
168 u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
169 u32 *srq_out;
170 void *srqc;
171 int err;
172
173 srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
174 if (!srq_out)
175 return -ENOMEM;
176
177 MLX5_SET(query_srq_in, srq_in, opcode,
178 MLX5_CMD_OP_QUERY_SRQ);
179 MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
b4990804
LR
180 err = mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
181 MLX5_ST_SZ_BYTES(query_srq_out));
f02d0d6e
LR
182 if (err)
183 goto out;
184
185 srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
186 get_srqc(srqc, out);
187 if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
188 out->flags |= MLX5_SRQ_FLAG_ERR;
189out:
190 kvfree(srq_out);
191 return err;
192}
193
b4990804 194static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
f02d0d6e
LR
195 struct mlx5_core_srq *srq,
196 struct mlx5_srq_attr *in)
197{
198 u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
199 void *create_in;
200 void *xrc_srqc;
201 void *pas;
202 int pas_size;
203 int inlen;
204 int err;
205
206 pas_size = get_pas_size(in);
207 inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
208 create_in = kvzalloc(inlen, GFP_KERNEL);
209 if (!create_in)
210 return -ENOMEM;
211
212 MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
213 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
214 xrc_srq_context_entry);
215 pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
216
217 set_srqc(xrc_srqc, in);
218 MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
219 memcpy(pas, in->pas, pas_size);
220 MLX5_SET(create_xrc_srq_in, create_in, opcode,
221 MLX5_CMD_OP_CREATE_XRC_SRQ);
222
223 memset(create_out, 0, sizeof(create_out));
b4990804 224 err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
f02d0d6e
LR
225 sizeof(create_out));
226 if (err)
227 goto out;
228
229 srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
230 srq->uid = in->uid;
231out:
232 kvfree(create_in);
233 return err;
234}
235
b4990804 236static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
f02d0d6e
LR
237 struct mlx5_core_srq *srq)
238{
239 u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
240 u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
241
242 MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
243 MLX5_CMD_OP_DESTROY_XRC_SRQ);
244 MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
245 MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid);
246
b4990804 247 return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
f02d0d6e
LR
248 xrcsrq_out, sizeof(xrcsrq_out));
249}
250
b4990804
LR
251static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
252 u16 lwm)
f02d0d6e
LR
253{
254 u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
255 u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
256
257 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
258 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
259 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
260 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
261 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid);
262
b4990804 263 return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
f02d0d6e
LR
264 xrcsrq_out, sizeof(xrcsrq_out));
265}
266
b4990804 267static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
f02d0d6e
LR
268 struct mlx5_core_srq *srq,
269 struct mlx5_srq_attr *out)
270{
271 u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
272 u32 *xrcsrq_out;
273 void *xrc_srqc;
274 int err;
275
276 xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
277 if (!xrcsrq_out)
278 return -ENOMEM;
279 memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
280
281 MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
282 MLX5_CMD_OP_QUERY_XRC_SRQ);
283 MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
284
b4990804
LR
285 err = mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
286 xrcsrq_out, MLX5_ST_SZ_BYTES(query_xrc_srq_out));
f02d0d6e
LR
287 if (err)
288 goto out;
289
290 xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
291 xrc_srq_context_entry);
292 get_srqc(xrc_srqc, out);
293 if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
294 out->flags |= MLX5_SRQ_FLAG_ERR;
295
296out:
297 kvfree(xrcsrq_out);
298 return err;
299}
300
b4990804 301static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
f02d0d6e
LR
302 struct mlx5_srq_attr *in)
303{
a1eb1802
LR
304 void *create_out = NULL;
305 void *create_in = NULL;
f02d0d6e
LR
306 void *rmpc;
307 void *wq;
308 int pas_size;
a1eb1802 309 int outlen;
f02d0d6e
LR
310 int inlen;
311 int err;
312
313 pas_size = get_pas_size(in);
314 inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
a1eb1802 315 outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
f02d0d6e 316 create_in = kvzalloc(inlen, GFP_KERNEL);
a1eb1802
LR
317 create_out = kvzalloc(outlen, GFP_KERNEL);
318 if (!create_in || !create_out) {
319 err = -ENOMEM;
320 goto out;
321 }
f02d0d6e
LR
322
323 rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
324 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
325
326 MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
327 MLX5_SET(create_rmp_in, create_in, uid, in->uid);
328 set_wq(wq, in);
329 memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
330
a1eb1802
LR
331 MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
332 err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
333 if (!err) {
334 srq->srqn = MLX5_GET(create_rmp_out, create_out, rmpn);
f02d0d6e 335 srq->uid = in->uid;
a1eb1802 336 }
f02d0d6e 337
a1eb1802 338out:
f02d0d6e 339 kvfree(create_in);
a1eb1802 340 kvfree(create_out);
f02d0d6e
LR
341 return err;
342}
343
b4990804 344static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
f02d0d6e
LR
345{
346 u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
347 u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {};
348
349 MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
350 MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
351 MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
b4990804 352 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
f02d0d6e
LR
353}
354
b4990804 355static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
f02d0d6e
LR
356 u16 lwm)
357{
36ff4880
LR
358 void *out = NULL;
359 void *in = NULL;
f02d0d6e
LR
360 void *rmpc;
361 void *wq;
362 void *bitmask;
36ff4880
LR
363 int outlen;
364 int inlen;
f02d0d6e
LR
365 int err;
366
36ff4880
LR
367 inlen = MLX5_ST_SZ_BYTES(modify_rmp_in);
368 outlen = MLX5_ST_SZ_BYTES(modify_rmp_out);
369
370 in = kvzalloc(inlen, GFP_KERNEL);
371 out = kvzalloc(outlen, GFP_KERNEL);
372 if (!in || !out) {
373 err = -ENOMEM;
374 goto out;
375 }
f02d0d6e
LR
376
377 rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
378 bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
379 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
380
381 MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
382 MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
383 MLX5_SET(modify_rmp_in, in, uid, srq->uid);
384 MLX5_SET(wq, wq, lwm, lwm);
385 MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
386 MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
36ff4880 387 MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
f02d0d6e 388
36ff4880 389 err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
f02d0d6e 390
36ff4880 391out:
f02d0d6e 392 kvfree(in);
36ff4880 393 kvfree(out);
f02d0d6e
LR
394 return err;
395}
396
b4990804 397static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
f02d0d6e
LR
398 struct mlx5_srq_attr *out)
399{
36ff4880
LR
400 u32 *rmp_out = NULL;
401 u32 *rmp_in = NULL;
f02d0d6e 402 void *rmpc;
36ff4880
LR
403 int outlen;
404 int inlen;
f02d0d6e
LR
405 int err;
406
36ff4880
LR
407 outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
408 inlen = MLX5_ST_SZ_BYTES(query_rmp_in);
409
410 rmp_out = kvzalloc(outlen, GFP_KERNEL);
411 rmp_in = kvzalloc(inlen, GFP_KERNEL);
412 if (!rmp_out || !rmp_in) {
413 err = -ENOMEM;
414 goto out;
415 }
f02d0d6e 416
36ff4880
LR
417 MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
418 MLX5_SET(query_rmp_in, rmp_in, rmpn, srq->srqn);
419 err = mlx5_cmd_exec(dev->mdev, rmp_in, inlen, rmp_out, outlen);
f02d0d6e
LR
420 if (err)
421 goto out;
422
423 rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
424 get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
425 if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
426 out->flags |= MLX5_SRQ_FLAG_ERR;
427
428out:
429 kvfree(rmp_out);
36ff4880 430 kvfree(rmp_in);
f02d0d6e
LR
431 return err;
432}
433
b4990804 434static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
f02d0d6e
LR
435 struct mlx5_srq_attr *in)
436{
437 u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
438 void *create_in;
439 void *xrqc;
440 void *wq;
441 int pas_size;
442 int inlen;
443 int err;
444
445 pas_size = get_pas_size(in);
446 inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
447 create_in = kvzalloc(inlen, GFP_KERNEL);
448 if (!create_in)
449 return -ENOMEM;
450
451 xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
452 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
453
454 set_wq(wq, in);
455 memcpy(MLX5_ADDR_OF(xrqc, xrqc, wq.pas), in->pas, pas_size);
456
457 if (in->type == IB_SRQT_TM) {
458 MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
459 if (in->flags & MLX5_SRQ_FLAG_RNDV)
460 MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
461 MLX5_SET(xrqc, xrqc,
462 tag_matching_topology_context.log_matching_list_sz,
463 in->tm_log_list_size);
464 }
465 MLX5_SET(xrqc, xrqc, user_index, in->user_index);
466 MLX5_SET(xrqc, xrqc, cqn, in->cqn);
467 MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
468 MLX5_SET(create_xrq_in, create_in, uid, in->uid);
b4990804 469 err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
f02d0d6e
LR
470 sizeof(create_out));
471 kvfree(create_in);
472 if (!err) {
473 srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
474 srq->uid = in->uid;
475 }
476
477 return err;
478}
479
b4990804 480static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
f02d0d6e
LR
481{
482 u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {0};
483 u32 out[MLX5_ST_SZ_DW(destroy_xrq_out)] = {0};
484
485 MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
486 MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
487 MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
488
b4990804 489 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
f02d0d6e
LR
490}
491
b4990804 492static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
f02d0d6e
LR
493 struct mlx5_core_srq *srq,
494 u16 lwm)
495{
496 u32 out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
497 u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
498
499 MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
500 MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
501 MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
502 MLX5_SET(arm_rq_in, in, lwm, lwm);
503 MLX5_SET(arm_rq_in, in, uid, srq->uid);
504
b4990804 505 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
f02d0d6e
LR
506}
507
b4990804 508static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
f02d0d6e
LR
509 struct mlx5_srq_attr *out)
510{
511 u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {0};
512 u32 *xrq_out;
513 int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
514 void *xrqc;
515 int err;
516
517 xrq_out = kvzalloc(outlen, GFP_KERNEL);
518 if (!xrq_out)
519 return -ENOMEM;
520
521 MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
522 MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
523
b4990804 524 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), xrq_out, outlen);
f02d0d6e
LR
525 if (err)
526 goto out;
527
528 xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
529 get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
530 if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
531 out->flags |= MLX5_SRQ_FLAG_ERR;
532 out->tm_next_tag =
533 MLX5_GET(xrqc, xrqc,
534 tag_matching_topology_context.append_next_index);
535 out->tm_hw_phase_cnt =
536 MLX5_GET(xrqc, xrqc,
537 tag_matching_topology_context.hw_phase_cnt);
538 out->tm_sw_phase_cnt =
539 MLX5_GET(xrqc, xrqc,
540 tag_matching_topology_context.sw_phase_cnt);
541
542out:
543 kvfree(xrq_out);
544 return err;
545}
546
b4990804 547static int create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
f02d0d6e
LR
548 struct mlx5_srq_attr *in)
549{
b4990804 550 if (!dev->mdev->issi)
f02d0d6e
LR
551 return create_srq_cmd(dev, srq, in);
552 switch (srq->common.res) {
553 case MLX5_RES_XSRQ:
554 return create_xrc_srq_cmd(dev, srq, in);
555 case MLX5_RES_XRQ:
556 return create_xrq_cmd(dev, srq, in);
557 default:
558 return create_rmp_cmd(dev, srq, in);
559 }
560}
561
b4990804 562static int destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
f02d0d6e 563{
b4990804 564 if (!dev->mdev->issi)
f02d0d6e
LR
565 return destroy_srq_cmd(dev, srq);
566 switch (srq->common.res) {
567 case MLX5_RES_XSRQ:
568 return destroy_xrc_srq_cmd(dev, srq);
569 case MLX5_RES_XRQ:
570 return destroy_xrq_cmd(dev, srq);
571 default:
572 return destroy_rmp_cmd(dev, srq);
573 }
574}
575
b4990804
LR
576int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
577 struct mlx5_srq_attr *in)
f02d0d6e 578{
f3da6577 579 struct mlx5_srq_table *table = &dev->srq_table;
f02d0d6e 580 int err;
b4990804 581
f02d0d6e
LR
582 switch (in->type) {
583 case IB_SRQT_XRC:
584 srq->common.res = MLX5_RES_XSRQ;
585 break;
586 case IB_SRQT_TM:
587 srq->common.res = MLX5_RES_XRQ;
588 break;
589 default:
590 srq->common.res = MLX5_RES_SRQ;
591 }
592
593 err = create_srq_split(dev, srq, in);
594 if (err)
595 return err;
596
10f56242
MS
597 atomic_set(&srq->common.refcount, 1);
598 init_completion(&srq->common.free);
f02d0d6e
LR
599
600 spin_lock_irq(&table->lock);
601 err = radix_tree_insert(&table->tree, srq->srqn, srq);
602 spin_unlock_irq(&table->lock);
603 if (err)
604 goto err_destroy_srq_split;
605
606 return 0;
607
608err_destroy_srq_split:
609 destroy_srq_split(dev, srq);
610
611 return err;
612}
613
b4990804 614int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
f02d0d6e 615{
f3da6577 616 struct mlx5_srq_table *table = &dev->srq_table;
f02d0d6e
LR
617 struct mlx5_core_srq *tmp;
618 int err;
619
620 spin_lock_irq(&table->lock);
621 tmp = radix_tree_delete(&table->tree, srq->srqn);
622 spin_unlock_irq(&table->lock);
623 if (!tmp || tmp != srq)
624 return -EINVAL;
625
626 err = destroy_srq_split(dev, srq);
627 if (err)
628 return err;
629
10f56242
MS
630 mlx5_core_res_put(&srq->common);
631 wait_for_completion(&srq->common.free);
f02d0d6e
LR
632
633 return 0;
634}
635
b4990804
LR
636int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
637 struct mlx5_srq_attr *out)
f02d0d6e 638{
b4990804 639 if (!dev->mdev->issi)
f02d0d6e
LR
640 return query_srq_cmd(dev, srq, out);
641 switch (srq->common.res) {
642 case MLX5_RES_XSRQ:
643 return query_xrc_srq_cmd(dev, srq, out);
644 case MLX5_RES_XRQ:
645 return query_xrq_cmd(dev, srq, out);
646 default:
647 return query_rmp_cmd(dev, srq, out);
648 }
649}
650
b4990804
LR
651int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
652 u16 lwm, int is_srq)
f02d0d6e 653{
b4990804 654 if (!dev->mdev->issi)
f02d0d6e
LR
655 return arm_srq_cmd(dev, srq, lwm, is_srq);
656 switch (srq->common.res) {
657 case MLX5_RES_XSRQ:
658 return arm_xrc_srq_cmd(dev, srq, lwm);
659 case MLX5_RES_XRQ:
660 return arm_xrq_cmd(dev, srq, lwm);
661 default:
662 return arm_rmp_cmd(dev, srq, lwm);
663 }
664}
f3da6577
LR
665
666static int srq_event_notifier(struct notifier_block *nb,
667 unsigned long type, void *data)
668{
669 struct mlx5_srq_table *table;
670 struct mlx5_core_srq *srq;
671 struct mlx5_eqe *eqe;
672 u32 srqn;
673
674 if (type != MLX5_EVENT_TYPE_SRQ_CATAS_ERROR &&
675 type != MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)
676 return NOTIFY_DONE;
677
678 table = container_of(nb, struct mlx5_srq_table, nb);
679
680 eqe = data;
681 srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
682
683 spin_lock(&table->lock);
684
685 srq = radix_tree_lookup(&table->tree, srqn);
686 if (srq)
10f56242 687 atomic_inc(&srq->common.refcount);
f3da6577
LR
688
689 spin_unlock(&table->lock);
690
691 if (!srq)
692 return NOTIFY_OK;
693
694 srq->event(srq, eqe->type);
695
10f56242 696 mlx5_core_res_put(&srq->common);
f3da6577
LR
697
698 return NOTIFY_OK;
699}
700
701int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
702{
703 struct mlx5_srq_table *table = &dev->srq_table;
704
705 memset(table, 0, sizeof(*table));
706 spin_lock_init(&table->lock);
707 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
708
709 table->nb.notifier_call = srq_event_notifier;
710 mlx5_notifier_register(dev->mdev, &table->nb);
711
712 return 0;
713}
714
715void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev)
716{
717 struct mlx5_srq_table *table = &dev->srq_table;
718
719 mlx5_notifier_unregister(dev->mdev, &table->nb);
720}