]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlx4/qp.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / qp.c
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/gfp.h>
37 #include <linux/export.h>
38
39 #include <linux/mlx4/cmd.h>
40 #include <linux/mlx4/qp.h>
41
42 #include "mlx4.h"
43 #include "icm.h"
44
45 /* QP to support BF should have bits 6,7 cleared */
46 #define MLX4_BF_QP_SKIP_MASK 0xc0
47 #define MLX4_MAX_BF_QP_RANGE 0x40
48
49 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
50 {
51 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
52 struct mlx4_qp *qp;
53
54 spin_lock(&qp_table->lock);
55
56 qp = __mlx4_qp_lookup(dev, qpn);
57 if (qp)
58 atomic_inc(&qp->refcount);
59
60 spin_unlock(&qp_table->lock);
61
62 if (!qp) {
63 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
64 return;
65 }
66
67 qp->event(qp, event_type);
68
69 if (atomic_dec_and_test(&qp->refcount))
70 complete(&qp->free);
71 }
72
73 /* used for INIT/CLOSE port logic */
74 static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
75 {
76 /* this procedure is called after we already know we are on the master */
77 /* qp0 is either the proxy qp0, or the real qp0 */
78 u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
79 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
80
81 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
82 qp->qpn <= dev->phys_caps.base_sqpn + 1;
83
84 return *real_qp0 || *proxy_qp0;
85 }
86
87 static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
88 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
89 struct mlx4_qp_context *context,
90 enum mlx4_qp_optpar optpar,
91 int sqd_event, struct mlx4_qp *qp, int native)
92 {
93 static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
94 [MLX4_QP_STATE_RST] = {
95 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
96 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
97 [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP,
98 },
99 [MLX4_QP_STATE_INIT] = {
100 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
101 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
102 [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP,
103 [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP,
104 },
105 [MLX4_QP_STATE_RTR] = {
106 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
107 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
108 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP,
109 },
110 [MLX4_QP_STATE_RTS] = {
111 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
112 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
113 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP,
114 [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP,
115 },
116 [MLX4_QP_STATE_SQD] = {
117 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
118 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
119 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP,
120 [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP,
121 },
122 [MLX4_QP_STATE_SQER] = {
123 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
124 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
125 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP,
126 },
127 [MLX4_QP_STATE_ERR] = {
128 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
129 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
130 }
131 };
132
133 struct mlx4_priv *priv = mlx4_priv(dev);
134 struct mlx4_cmd_mailbox *mailbox;
135 int ret = 0;
136 int real_qp0 = 0;
137 int proxy_qp0 = 0;
138 u8 port;
139
140 if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
141 !op[cur_state][new_state])
142 return -EINVAL;
143
144 if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
145 ret = mlx4_cmd(dev, 0, qp->qpn, 2,
146 MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
147 if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
148 cur_state != MLX4_QP_STATE_RST &&
149 is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
150 port = (qp->qpn & 1) + 1;
151 if (proxy_qp0)
152 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
153 else
154 priv->mfunc.master.qp0_state[port].qp0_active = 0;
155 }
156 return ret;
157 }
158
159 mailbox = mlx4_alloc_cmd_mailbox(dev);
160 if (IS_ERR(mailbox))
161 return PTR_ERR(mailbox);
162
163 if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
164 u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
165 context->mtt_base_addr_h = mtt_addr >> 32;
166 context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
167 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
168 }
169
170 if ((cur_state == MLX4_QP_STATE_RTR) &&
171 (new_state == MLX4_QP_STATE_RTS) &&
172 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
173 context->roce_entropy =
174 cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn));
175
176 *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
177 memcpy(mailbox->buf + 8, context, sizeof *context);
178
179 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
180 cpu_to_be32(qp->qpn);
181
182 ret = mlx4_cmd(dev, mailbox->dma,
183 qp->qpn | (!!sqd_event << 31),
184 new_state == MLX4_QP_STATE_RST ? 2 : 0,
185 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
186
187 if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
188 port = (qp->qpn & 1) + 1;
189 if (cur_state != MLX4_QP_STATE_ERR &&
190 cur_state != MLX4_QP_STATE_RST &&
191 new_state == MLX4_QP_STATE_ERR) {
192 if (proxy_qp0)
193 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
194 else
195 priv->mfunc.master.qp0_state[port].qp0_active = 0;
196 } else if (new_state == MLX4_QP_STATE_RTR) {
197 if (proxy_qp0)
198 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
199 else
200 priv->mfunc.master.qp0_state[port].qp0_active = 1;
201 }
202 }
203
204 mlx4_free_cmd_mailbox(dev, mailbox);
205 return ret;
206 }
207
208 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
209 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
210 struct mlx4_qp_context *context,
211 enum mlx4_qp_optpar optpar,
212 int sqd_event, struct mlx4_qp *qp)
213 {
214 return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
215 optpar, sqd_event, qp, 0);
216 }
217 EXPORT_SYMBOL_GPL(mlx4_qp_modify);
218
219 int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
220 int *base, u8 flags)
221 {
222 u32 uid;
223 int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP);
224
225 struct mlx4_priv *priv = mlx4_priv(dev);
226 struct mlx4_qp_table *qp_table = &priv->qp_table;
227
228 if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp)
229 return -ENOMEM;
230
231 uid = MLX4_QP_TABLE_ZONE_GENERAL;
232 if (flags & (u8)MLX4_RESERVE_A0_QP) {
233 if (bf_qp)
234 uid = MLX4_QP_TABLE_ZONE_RAW_ETH;
235 else
236 uid = MLX4_QP_TABLE_ZONE_RSS;
237 }
238
239 *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align,
240 bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL);
241 if (*base == -1)
242 return -ENOMEM;
243
244 return 0;
245 }
246
247 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
248 int *base, u8 flags)
249 {
250 u64 in_param = 0;
251 u64 out_param;
252 int err;
253
254 /* Turn off all unsupported QP allocation flags */
255 flags &= dev->caps.alloc_res_qp_mask;
256
257 if (mlx4_is_mfunc(dev)) {
258 set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt);
259 set_param_h(&in_param, align);
260 err = mlx4_cmd_imm(dev, in_param, &out_param,
261 RES_QP, RES_OP_RESERVE,
262 MLX4_CMD_ALLOC_RES,
263 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
264 if (err)
265 return err;
266
267 *base = get_param_l(&out_param);
268 return 0;
269 }
270 return __mlx4_qp_reserve_range(dev, cnt, align, base, flags);
271 }
272 EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
273
274 void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
275 {
276 struct mlx4_priv *priv = mlx4_priv(dev);
277 struct mlx4_qp_table *qp_table = &priv->qp_table;
278
279 if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
280 return;
281 mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt);
282 }
283
284 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
285 {
286 u64 in_param = 0;
287 int err;
288
289 if (mlx4_is_mfunc(dev)) {
290 set_param_l(&in_param, base_qpn);
291 set_param_h(&in_param, cnt);
292 err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
293 MLX4_CMD_FREE_RES,
294 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
295 if (err) {
296 mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
297 base_qpn, cnt);
298 }
299 } else
300 __mlx4_qp_release_range(dev, base_qpn, cnt);
301 }
302 EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
303
304 int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
305 {
306 struct mlx4_priv *priv = mlx4_priv(dev);
307 struct mlx4_qp_table *qp_table = &priv->qp_table;
308 int err;
309
310 err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
311 if (err)
312 goto err_out;
313
314 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
315 if (err)
316 goto err_put_qp;
317
318 err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
319 if (err)
320 goto err_put_auxc;
321
322 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
323 if (err)
324 goto err_put_altc;
325
326 err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
327 if (err)
328 goto err_put_rdmarc;
329
330 return 0;
331
332 err_put_rdmarc:
333 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
334
335 err_put_altc:
336 mlx4_table_put(dev, &qp_table->altc_table, qpn);
337
338 err_put_auxc:
339 mlx4_table_put(dev, &qp_table->auxc_table, qpn);
340
341 err_put_qp:
342 mlx4_table_put(dev, &qp_table->qp_table, qpn);
343
344 err_out:
345 return err;
346 }
347
348 static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
349 {
350 u64 param = 0;
351
352 if (mlx4_is_mfunc(dev)) {
353 set_param_l(&param, qpn);
354 return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
355 MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
356 MLX4_CMD_WRAPPED);
357 }
358 return __mlx4_qp_alloc_icm(dev, qpn);
359 }
360
361 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
362 {
363 struct mlx4_priv *priv = mlx4_priv(dev);
364 struct mlx4_qp_table *qp_table = &priv->qp_table;
365
366 mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
367 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
368 mlx4_table_put(dev, &qp_table->altc_table, qpn);
369 mlx4_table_put(dev, &qp_table->auxc_table, qpn);
370 mlx4_table_put(dev, &qp_table->qp_table, qpn);
371 }
372
373 static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
374 {
375 u64 in_param = 0;
376
377 if (mlx4_is_mfunc(dev)) {
378 set_param_l(&in_param, qpn);
379 if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
380 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
381 MLX4_CMD_WRAPPED))
382 mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
383 } else
384 __mlx4_qp_free_icm(dev, qpn);
385 }
386
387 struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
388 {
389 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
390 struct mlx4_qp *qp;
391
392 spin_lock(&qp_table->lock);
393
394 qp = __mlx4_qp_lookup(dev, qpn);
395
396 spin_unlock(&qp_table->lock);
397 return qp;
398 }
399
400 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
401 {
402 struct mlx4_priv *priv = mlx4_priv(dev);
403 struct mlx4_qp_table *qp_table = &priv->qp_table;
404 int err;
405
406 if (!qpn)
407 return -EINVAL;
408
409 qp->qpn = qpn;
410
411 err = mlx4_qp_alloc_icm(dev, qpn);
412 if (err)
413 return err;
414
415 spin_lock_irq(&qp_table->lock);
416 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
417 (dev->caps.num_qps - 1), qp);
418 spin_unlock_irq(&qp_table->lock);
419 if (err)
420 goto err_icm;
421
422 atomic_set(&qp->refcount, 1);
423 init_completion(&qp->free);
424
425 return 0;
426
427 err_icm:
428 mlx4_qp_free_icm(dev, qpn);
429 return err;
430 }
431
432 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
433
434 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
435 enum mlx4_update_qp_attr attr,
436 struct mlx4_update_qp_params *params)
437 {
438 struct mlx4_cmd_mailbox *mailbox;
439 struct mlx4_update_qp_context *cmd;
440 u64 pri_addr_path_mask = 0;
441 u64 qp_mask = 0;
442 int err = 0;
443
444 if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
445 return -EINVAL;
446
447 mailbox = mlx4_alloc_cmd_mailbox(dev);
448 if (IS_ERR(mailbox))
449 return PTR_ERR(mailbox);
450
451 cmd = (struct mlx4_update_qp_context *)mailbox->buf;
452
453 if (attr & MLX4_UPDATE_QP_SMAC) {
454 pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
455 cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
456 }
457
458 if (attr & MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB) {
459 if (!(dev->caps.flags2
460 & MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
461 mlx4_warn(dev,
462 "Trying to set src check LB, but it isn't supported\n");
463 err = -EOPNOTSUPP;
464 goto out;
465 }
466 pri_addr_path_mask |=
467 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB;
468 if (params->flags &
469 MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB) {
470 cmd->qp_context.pri_path.fl |=
471 MLX4_FL_ETH_SRC_CHECK_MC_LB;
472 }
473 }
474
475 if (attr & MLX4_UPDATE_QP_VSD) {
476 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
477 if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
478 cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
479 }
480
481 if (attr & MLX4_UPDATE_QP_RATE_LIMIT) {
482 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT;
483 cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
484 }
485
486 if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
487 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
488 mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
489 err = -EOPNOTSUPP;
490 goto out;
491 }
492
493 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
494 cmd->qp_context.qos_vport = params->qos_vport;
495 }
496
497 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
498 cmd->qp_mask = cpu_to_be64(qp_mask);
499
500 err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
501 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
502 MLX4_CMD_NATIVE);
503 out:
504 mlx4_free_cmd_mailbox(dev, mailbox);
505 return err;
506 }
507 EXPORT_SYMBOL_GPL(mlx4_update_qp);
508
509 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
510 {
511 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
512 unsigned long flags;
513
514 spin_lock_irqsave(&qp_table->lock, flags);
515 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
516 spin_unlock_irqrestore(&qp_table->lock, flags);
517 }
518 EXPORT_SYMBOL_GPL(mlx4_qp_remove);
519
520 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
521 {
522 if (atomic_dec_and_test(&qp->refcount))
523 complete(&qp->free);
524 wait_for_completion(&qp->free);
525
526 mlx4_qp_free_icm(dev, qp->qpn);
527 }
528 EXPORT_SYMBOL_GPL(mlx4_qp_free);
529
530 static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
531 {
532 return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
533 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
534 }
535
536 #define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2
537 #define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1
538 #define MLX4_QP_TABLE_RAW_ETH_SIZE 256
539
540 static int mlx4_create_zones(struct mlx4_dev *dev,
541 u32 reserved_bottom_general,
542 u32 reserved_top_general,
543 u32 reserved_bottom_rss,
544 u32 start_offset_rss,
545 u32 max_table_offset)
546 {
547 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
548 struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL;
549 int bitmap_initialized = 0;
550 u32 last_offset;
551 int k;
552 int err;
553
554 qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP);
555
556 if (NULL == qp_table->zones)
557 return -ENOMEM;
558
559 bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
560
561 if (NULL == bitmap) {
562 err = -ENOMEM;
563 goto free_zone;
564 }
565
566 err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps,
567 (1 << 23) - 1, reserved_bottom_general,
568 reserved_top_general);
569
570 if (err)
571 goto free_bitmap;
572
573 ++bitmap_initialized;
574
575 err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL,
576 MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO |
577 MLX4_ZONE_USE_RR, 0,
578 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL);
579
580 if (err)
581 goto free_bitmap;
582
583 err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS,
584 reserved_bottom_rss,
585 reserved_bottom_rss - 1,
586 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
587 reserved_bottom_rss - start_offset_rss);
588
589 if (err)
590 goto free_bitmap;
591
592 ++bitmap_initialized;
593
594 err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS,
595 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
596 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
597 MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY,
598 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS);
599
600 if (err)
601 goto free_bitmap;
602
603 last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
604 /* We have a single zone for the A0 steering QPs area of the FW. This area
605 * needs to be split into subareas. One set of subareas is for RSS QPs
606 * (in which qp number bits 6 and/or 7 are set); the other set of subareas
607 * is for RAW_ETH QPs, which require that both bits 6 and 7 are zero.
608 * Currently, the values returned by the FW (A0 steering area starting qp number
609 * and A0 steering area size) are such that there are only two subareas -- one
610 * for RSS and one for RAW_ETH.
611 */
612 for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]);
613 k++) {
614 int size;
615 u32 offset = start_offset_rss;
616 u32 bf_mask;
617 u32 requested_size;
618
619 /* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates
620 * a mask of all LSB bits set until (and not including) the first
621 * set bit of MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK
622 * is 0xc0, bf_mask will be 0x3f.
623 */
624 bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1;
625 requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1);
626
627 if (((last_offset & MLX4_BF_QP_SKIP_MASK) &&
628 ((int)(max_table_offset - last_offset)) >=
629 roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) ||
630 (!(last_offset & MLX4_BF_QP_SKIP_MASK) &&
631 !((last_offset + requested_size - 1) &
632 MLX4_BF_QP_SKIP_MASK)))
633 size = requested_size;
634 else {
635 u32 candidate_offset =
636 (last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1;
637
638 if (last_offset & MLX4_BF_QP_SKIP_MASK)
639 last_offset = candidate_offset;
640
641 /* From this point, the BF bits are 0 */
642
643 if (last_offset > max_table_offset) {
644 /* need to skip */
645 size = -1;
646 } else {
647 size = min3(max_table_offset - last_offset,
648 bf_mask - (last_offset & bf_mask),
649 requested_size);
650 if (size < requested_size) {
651 int candidate_size;
652
653 candidate_size = min3(
654 max_table_offset - candidate_offset,
655 bf_mask - (last_offset & bf_mask),
656 requested_size);
657
658 /* We will not take this path if last_offset was
659 * already set above to candidate_offset
660 */
661 if (candidate_size > size) {
662 last_offset = candidate_offset;
663 size = candidate_size;
664 }
665 }
666 }
667 }
668
669 if (size > 0) {
670 /* mlx4_bitmap_alloc_range will find a contiguous range of "size"
671 * QPs in which both bits 6 and 7 are zero, because we pass it the
672 * MLX4_BF_SKIP_MASK).
673 */
674 offset = mlx4_bitmap_alloc_range(
675 *bitmap + MLX4_QP_TABLE_ZONE_RSS,
676 size, 1,
677 MLX4_BF_QP_SKIP_MASK);
678
679 if (offset == (u32)-1) {
680 err = -ENOMEM;
681 break;
682 }
683
684 last_offset = offset + size;
685
686 err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size),
687 roundup_pow_of_two(size) - 1, 0,
688 roundup_pow_of_two(size) - size);
689 } else {
690 /* Add an empty bitmap, we'll allocate from different zones (since
691 * at least one is reserved)
692 */
693 err = mlx4_bitmap_init(*bitmap + k, 1,
694 MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
695 0);
696 mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
697 }
698
699 if (err)
700 break;
701
702 ++bitmap_initialized;
703
704 err = mlx4_zone_add_one(qp_table->zones, *bitmap + k,
705 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
706 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
707 MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY,
708 offset, qp_table->zones_uids + k);
709
710 if (err)
711 break;
712 }
713
714 if (err)
715 goto free_bitmap;
716
717 qp_table->bitmap_gen = *bitmap;
718
719 return err;
720
721 free_bitmap:
722 for (k = 0; k < bitmap_initialized; k++)
723 mlx4_bitmap_cleanup(*bitmap + k);
724 kfree(bitmap);
725 free_zone:
726 mlx4_zone_allocator_destroy(qp_table->zones);
727 return err;
728 }
729
730 static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev)
731 {
732 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
733
734 if (qp_table->zones) {
735 int i;
736
737 for (i = 0;
738 i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]);
739 i++) {
740 struct mlx4_bitmap *bitmap =
741 mlx4_zone_get_bitmap(qp_table->zones,
742 qp_table->zones_uids[i]);
743
744 mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]);
745 if (NULL == bitmap)
746 continue;
747
748 mlx4_bitmap_cleanup(bitmap);
749 }
750 mlx4_zone_allocator_destroy(qp_table->zones);
751 kfree(qp_table->bitmap_gen);
752 qp_table->bitmap_gen = NULL;
753 qp_table->zones = NULL;
754 }
755 }
756
757 int mlx4_init_qp_table(struct mlx4_dev *dev)
758 {
759 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
760 int err;
761 int reserved_from_top = 0;
762 int reserved_from_bot;
763 int k;
764 int fixed_reserved_from_bot_rv = 0;
765 int bottom_reserved_for_rss_bitmap;
766 u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base +
767 dev->caps.dmfs_high_rate_qpn_range;
768
769 spin_lock_init(&qp_table->lock);
770 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
771 if (mlx4_is_slave(dev))
772 return 0;
773
774 /* We reserve 2 extra QPs per port for the special QPs. The
775 * block of special QPs must be aligned to a multiple of 8, so
776 * round up.
777 *
778 * We also reserve the MSB of the 24-bit QP number to indicate
779 * that a QP is an XRC QP.
780 */
781 for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++)
782 fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k];
783
784 if (fixed_reserved_from_bot_rv < max_table_offset)
785 fixed_reserved_from_bot_rv = max_table_offset;
786
787 /* We reserve at least 1 extra for bitmaps that we don't have enough space for*/
788 bottom_reserved_for_rss_bitmap =
789 roundup_pow_of_two(fixed_reserved_from_bot_rv + 1);
790 dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8);
791
792 {
793 int sort[MLX4_NUM_QP_REGION];
794 int i, j;
795 int last_base = dev->caps.num_qps;
796
797 for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
798 sort[i] = i;
799
800 for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
801 for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
802 if (dev->caps.reserved_qps_cnt[sort[j]] >
803 dev->caps.reserved_qps_cnt[sort[j - 1]])
804 swap(sort[j], sort[j - 1]);
805 }
806 }
807
808 for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) {
809 last_base -= dev->caps.reserved_qps_cnt[sort[i]];
810 dev->caps.reserved_qps_base[sort[i]] = last_base;
811 reserved_from_top +=
812 dev->caps.reserved_qps_cnt[sort[i]];
813 }
814 }
815
816 /* Reserve 8 real SQPs in both native and SRIOV modes.
817 * In addition, in SRIOV mode, reserve 8 proxy SQPs per function
818 * (for all PFs and VFs), and 8 corresponding tunnel QPs.
819 * Each proxy SQP works opposite its own tunnel QP.
820 *
821 * The QPs are arranged as follows:
822 * a. 8 real SQPs
823 * b. All the proxy SQPs (8 per function)
824 * c. All the tunnel QPs (8 per function)
825 */
826 reserved_from_bot = mlx4_num_reserved_sqps(dev);
827 if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) {
828 mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n");
829 return -EINVAL;
830 }
831
832 err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot,
833 bottom_reserved_for_rss_bitmap,
834 fixed_reserved_from_bot_rv,
835 max_table_offset);
836
837 if (err)
838 return err;
839
840 if (mlx4_is_mfunc(dev)) {
841 /* for PPF use */
842 dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
843 dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
844
845 /* In mfunc, calculate proxy and tunnel qp offsets for the PF here,
846 * since the PF does not call mlx4_slave_caps */
847 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
848 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
849 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
850 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
851
852 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
853 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
854 err = -ENOMEM;
855 goto err_mem;
856 }
857
858 for (k = 0; k < dev->caps.num_ports; k++) {
859 dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn +
860 8 * mlx4_master_func_num(dev) + k;
861 dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX;
862 dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn +
863 8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
864 dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX;
865 }
866 }
867
868
869 err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
870 if (err)
871 goto err_mem;
872
873 return err;
874
875 err_mem:
876 kfree(dev->caps.qp0_tunnel);
877 kfree(dev->caps.qp0_proxy);
878 kfree(dev->caps.qp1_tunnel);
879 kfree(dev->caps.qp1_proxy);
880 dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
881 dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
882 mlx4_cleanup_qp_zones(dev);
883 return err;
884 }
885
886 void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
887 {
888 if (mlx4_is_slave(dev))
889 return;
890
891 mlx4_CONF_SPECIAL_QP(dev, 0);
892
893 mlx4_cleanup_qp_zones(dev);
894 }
895
896 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
897 struct mlx4_qp_context *context)
898 {
899 struct mlx4_cmd_mailbox *mailbox;
900 int err;
901
902 mailbox = mlx4_alloc_cmd_mailbox(dev);
903 if (IS_ERR(mailbox))
904 return PTR_ERR(mailbox);
905
906 err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
907 MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
908 MLX4_CMD_WRAPPED);
909 if (!err)
910 memcpy(context, mailbox->buf + 8, sizeof *context);
911
912 mlx4_free_cmd_mailbox(dev, mailbox);
913 return err;
914 }
915 EXPORT_SYMBOL_GPL(mlx4_qp_query);
916
917 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
918 struct mlx4_qp_context *context,
919 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
920 {
921 int err;
922 int i;
923 enum mlx4_qp_state states[] = {
924 MLX4_QP_STATE_RST,
925 MLX4_QP_STATE_INIT,
926 MLX4_QP_STATE_RTR,
927 MLX4_QP_STATE_RTS
928 };
929
930 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
931 context->flags &= cpu_to_be32(~(0xf << 28));
932 context->flags |= cpu_to_be32(states[i + 1] << 28);
933 if (states[i + 1] != MLX4_QP_STATE_RTR)
934 context->params2 &= ~MLX4_QP_BIT_FPP;
935 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
936 context, 0, 0, qp);
937 if (err) {
938 mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
939 states[i + 1], err);
940 return err;
941 }
942
943 *qp_state = states[i + 1];
944 }
945
946 return 0;
947 }
948 EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
949
950 u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn)
951 {
952 struct mlx4_qp_context context;
953 struct mlx4_qp qp;
954 int err;
955
956 qp.qpn = qpn;
957 err = mlx4_qp_query(dev, &qp, &context);
958 if (!err) {
959 u32 dest_qpn = be32_to_cpu(context.remote_qpn) & 0xffffff;
960 u16 folded_dst = folded_qp(dest_qpn);
961 u16 folded_src = folded_qp(qpn);
962
963 return (dest_qpn != qpn) ?
964 ((folded_dst ^ folded_src) | 0xC000) :
965 folded_src | 0xC000;
966 }
967 return 0xdead;
968 }