]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
cxgb3: re-use native hex2bin()
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
CommitLineData
c82e9aa0
EC
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
e143a1ad 41#include <linux/slab.h>
c82e9aa0
EC
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
af22d9de 44#include <linux/if_ether.h>
7fb40f87 45#include <linux/etherdevice.h>
c82e9aa0
EC
46
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID (1ull << 63)
c82e9aa0
EC
51
52struct mac_res {
53 struct list_head list;
54 u64 mac;
2f5bb473
JM
55 int ref_count;
56 u8 smac_index;
c82e9aa0
EC
57 u8 port;
58};
59
4874080d
JM
60struct vlan_res {
61 struct list_head list;
62 u16 vlan;
63 int ref_count;
64 int vlan_index;
65 u8 port;
66};
67
c82e9aa0
EC
68struct res_common {
69 struct list_head list;
4af1c048 70 struct rb_node node;
aa1ec3dd 71 u64 res_id;
c82e9aa0
EC
72 int owner;
73 int state;
74 int from_state;
75 int to_state;
76 int removing;
77};
78
79enum {
80 RES_ANY_BUSY = 1
81};
82
83struct res_gid {
84 struct list_head list;
85 u8 gid[16];
86 enum mlx4_protocol prot;
9f5b6c63 87 enum mlx4_steer_type steer;
fab1e24a 88 u64 reg_id;
c82e9aa0
EC
89};
90
91enum res_qp_states {
92 RES_QP_BUSY = RES_ANY_BUSY,
93
94 /* QP number was allocated */
95 RES_QP_RESERVED,
96
97 /* ICM memory for QP context was mapped */
98 RES_QP_MAPPED,
99
100 /* QP is in hw ownership */
101 RES_QP_HW
102};
103
c82e9aa0
EC
104struct res_qp {
105 struct res_common com;
106 struct res_mtt *mtt;
107 struct res_cq *rcq;
108 struct res_cq *scq;
109 struct res_srq *srq;
110 struct list_head mcg_list;
111 spinlock_t mcg_spl;
112 int local_qpn;
2c473ae7 113 atomic_t ref_count;
b01978ca 114 u32 qpc_flags;
f0f829bf 115 /* saved qp params before VST enforcement in order to restore on VGT */
b01978ca 116 u8 sched_queue;
f0f829bf
RE
117 __be32 param3;
118 u8 vlan_control;
119 u8 fvl_rx;
120 u8 pri_path_fl;
121 u8 vlan_index;
122 u8 feup;
c82e9aa0
EC
123};
124
125enum res_mtt_states {
126 RES_MTT_BUSY = RES_ANY_BUSY,
127 RES_MTT_ALLOCATED,
128};
129
130static inline const char *mtt_states_str(enum res_mtt_states state)
131{
132 switch (state) {
133 case RES_MTT_BUSY: return "RES_MTT_BUSY";
134 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135 default: return "Unknown";
136 }
137}
138
139struct res_mtt {
140 struct res_common com;
141 int order;
142 atomic_t ref_count;
143};
144
145enum res_mpt_states {
146 RES_MPT_BUSY = RES_ANY_BUSY,
147 RES_MPT_RESERVED,
148 RES_MPT_MAPPED,
149 RES_MPT_HW,
150};
151
152struct res_mpt {
153 struct res_common com;
154 struct res_mtt *mtt;
155 int key;
156};
157
158enum res_eq_states {
159 RES_EQ_BUSY = RES_ANY_BUSY,
160 RES_EQ_RESERVED,
161 RES_EQ_HW,
162};
163
164struct res_eq {
165 struct res_common com;
166 struct res_mtt *mtt;
167};
168
169enum res_cq_states {
170 RES_CQ_BUSY = RES_ANY_BUSY,
171 RES_CQ_ALLOCATED,
172 RES_CQ_HW,
173};
174
175struct res_cq {
176 struct res_common com;
177 struct res_mtt *mtt;
178 atomic_t ref_count;
179};
180
181enum res_srq_states {
182 RES_SRQ_BUSY = RES_ANY_BUSY,
183 RES_SRQ_ALLOCATED,
184 RES_SRQ_HW,
185};
186
c82e9aa0
EC
187struct res_srq {
188 struct res_common com;
189 struct res_mtt *mtt;
190 struct res_cq *cq;
191 atomic_t ref_count;
192};
193
194enum res_counter_states {
195 RES_COUNTER_BUSY = RES_ANY_BUSY,
196 RES_COUNTER_ALLOCATED,
197};
198
c82e9aa0
EC
199struct res_counter {
200 struct res_common com;
201 int port;
202};
203
ba062d52
JM
204enum res_xrcdn_states {
205 RES_XRCD_BUSY = RES_ANY_BUSY,
206 RES_XRCD_ALLOCATED,
207};
208
209struct res_xrcdn {
210 struct res_common com;
211 int port;
212};
213
1b9c6b06
HHZ
214enum res_fs_rule_states {
215 RES_FS_RULE_BUSY = RES_ANY_BUSY,
216 RES_FS_RULE_ALLOCATED,
217};
218
219struct res_fs_rule {
220 struct res_common com;
2c473ae7 221 int qpn;
1b9c6b06
HHZ
222};
223
b6ffaeff
JM
224static int mlx4_is_eth(struct mlx4_dev *dev, int port)
225{
226 return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
227}
228
4af1c048
HHZ
229static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
230{
231 struct rb_node *node = root->rb_node;
232
233 while (node) {
234 struct res_common *res = container_of(node, struct res_common,
235 node);
236
237 if (res_id < res->res_id)
238 node = node->rb_left;
239 else if (res_id > res->res_id)
240 node = node->rb_right;
241 else
242 return res;
243 }
244 return NULL;
245}
246
247static int res_tracker_insert(struct rb_root *root, struct res_common *res)
248{
249 struct rb_node **new = &(root->rb_node), *parent = NULL;
250
251 /* Figure out where to put new node */
252 while (*new) {
253 struct res_common *this = container_of(*new, struct res_common,
254 node);
255
256 parent = *new;
257 if (res->res_id < this->res_id)
258 new = &((*new)->rb_left);
259 else if (res->res_id > this->res_id)
260 new = &((*new)->rb_right);
261 else
262 return -EEXIST;
263 }
264
265 /* Add new node and rebalance tree. */
266 rb_link_node(&res->node, parent, new);
267 rb_insert_color(&res->node, root);
268
269 return 0;
270}
271
54679e14
JM
272enum qp_transition {
273 QP_TRANS_INIT2RTR,
274 QP_TRANS_RTR2RTS,
275 QP_TRANS_RTS2RTS,
276 QP_TRANS_SQERR2RTS,
277 QP_TRANS_SQD2SQD,
278 QP_TRANS_SQD2RTS
279};
280
c82e9aa0 281/* For Debug uses */
95646373 282static const char *resource_str(enum mlx4_resource rt)
c82e9aa0
EC
283{
284 switch (rt) {
285 case RES_QP: return "RES_QP";
286 case RES_CQ: return "RES_CQ";
287 case RES_SRQ: return "RES_SRQ";
288 case RES_MPT: return "RES_MPT";
289 case RES_MTT: return "RES_MTT";
290 case RES_MAC: return "RES_MAC";
4874080d 291 case RES_VLAN: return "RES_VLAN";
c82e9aa0
EC
292 case RES_EQ: return "RES_EQ";
293 case RES_COUNTER: return "RES_COUNTER";
1b9c6b06 294 case RES_FS_RULE: return "RES_FS_RULE";
ba062d52 295 case RES_XRCD: return "RES_XRCD";
c82e9aa0
EC
296 default: return "Unknown resource type !!!";
297 };
298}
299
4874080d 300static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
146f3ef4
JM
301static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
302 enum mlx4_resource res_type, int count,
303 int port)
304{
305 struct mlx4_priv *priv = mlx4_priv(dev);
306 struct resource_allocator *res_alloc =
307 &priv->mfunc.master.res_tracker.res_alloc[res_type];
308 int err = -EINVAL;
309 int allocated, free, reserved, guaranteed, from_free;
95646373 310 int from_rsvd;
146f3ef4
JM
311
312 if (slave > dev->num_vfs)
313 return -EINVAL;
314
315 spin_lock(&res_alloc->alloc_lock);
316 allocated = (port > 0) ?
317 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
318 res_alloc->allocated[slave];
319 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
320 res_alloc->res_free;
321 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
322 res_alloc->res_reserved;
323 guaranteed = res_alloc->guaranteed[slave];
324
95646373
JM
325 if (allocated + count > res_alloc->quota[slave]) {
326 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
327 slave, port, resource_str(res_type), count,
328 allocated, res_alloc->quota[slave]);
146f3ef4 329 goto out;
95646373 330 }
146f3ef4
JM
331
332 if (allocated + count <= guaranteed) {
333 err = 0;
95646373 334 from_rsvd = count;
146f3ef4
JM
335 } else {
336 /* portion may need to be obtained from free area */
337 if (guaranteed - allocated > 0)
338 from_free = count - (guaranteed - allocated);
339 else
340 from_free = count;
341
95646373
JM
342 from_rsvd = count - from_free;
343
344 if (free - from_free >= reserved)
146f3ef4 345 err = 0;
95646373
JM
346 else
347 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
348 slave, port, resource_str(res_type), free,
349 from_free, reserved);
146f3ef4
JM
350 }
351
352 if (!err) {
353 /* grant the request */
354 if (port > 0) {
355 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
356 res_alloc->res_port_free[port - 1] -= count;
95646373 357 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
146f3ef4
JM
358 } else {
359 res_alloc->allocated[slave] += count;
360 res_alloc->res_free -= count;
95646373 361 res_alloc->res_reserved -= from_rsvd;
146f3ef4
JM
362 }
363 }
364
365out:
366 spin_unlock(&res_alloc->alloc_lock);
367 return err;
368}
369
370static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
371 enum mlx4_resource res_type, int count,
372 int port)
373{
374 struct mlx4_priv *priv = mlx4_priv(dev);
375 struct resource_allocator *res_alloc =
376 &priv->mfunc.master.res_tracker.res_alloc[res_type];
95646373 377 int allocated, guaranteed, from_rsvd;
146f3ef4
JM
378
379 if (slave > dev->num_vfs)
380 return;
381
382 spin_lock(&res_alloc->alloc_lock);
95646373
JM
383
384 allocated = (port > 0) ?
385 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
386 res_alloc->allocated[slave];
387 guaranteed = res_alloc->guaranteed[slave];
388
389 if (allocated - count >= guaranteed) {
390 from_rsvd = 0;
391 } else {
392 /* portion may need to be returned to reserved area */
393 if (allocated - guaranteed > 0)
394 from_rsvd = count - (allocated - guaranteed);
395 else
396 from_rsvd = count;
397 }
398
146f3ef4
JM
399 if (port > 0) {
400 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
401 res_alloc->res_port_free[port - 1] += count;
95646373 402 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
146f3ef4
JM
403 } else {
404 res_alloc->allocated[slave] -= count;
405 res_alloc->res_free += count;
95646373 406 res_alloc->res_reserved += from_rsvd;
146f3ef4
JM
407 }
408
409 spin_unlock(&res_alloc->alloc_lock);
410 return;
411}
412
5a0d0a61
JM
413static inline void initialize_res_quotas(struct mlx4_dev *dev,
414 struct resource_allocator *res_alloc,
415 enum mlx4_resource res_type,
416 int vf, int num_instances)
417{
418 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
419 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
420 if (vf == mlx4_master_func_num(dev)) {
421 res_alloc->res_free = num_instances;
422 if (res_type == RES_MTT) {
423 /* reserved mtts will be taken out of the PF allocation */
424 res_alloc->res_free += dev->caps.reserved_mtts;
425 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
426 res_alloc->quota[vf] += dev->caps.reserved_mtts;
427 }
428 }
429}
430
431void mlx4_init_quotas(struct mlx4_dev *dev)
432{
433 struct mlx4_priv *priv = mlx4_priv(dev);
434 int pf;
435
436 /* quotas for VFs are initialized in mlx4_slave_cap */
437 if (mlx4_is_slave(dev))
438 return;
439
440 if (!mlx4_is_mfunc(dev)) {
441 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
442 mlx4_num_reserved_sqps(dev);
443 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
444 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
445 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
446 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
447 return;
448 }
449
450 pf = mlx4_master_func_num(dev);
451 dev->quotas.qp =
452 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
453 dev->quotas.cq =
454 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
455 dev->quotas.srq =
456 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
457 dev->quotas.mtt =
458 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
459 dev->quotas.mpt =
460 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
461}
c82e9aa0
EC
462int mlx4_init_resource_tracker(struct mlx4_dev *dev)
463{
464 struct mlx4_priv *priv = mlx4_priv(dev);
5a0d0a61 465 int i, j;
c82e9aa0
EC
466 int t;
467
468 priv->mfunc.master.res_tracker.slave_list =
469 kzalloc(dev->num_slaves * sizeof(struct slave_list),
470 GFP_KERNEL);
471 if (!priv->mfunc.master.res_tracker.slave_list)
472 return -ENOMEM;
473
474 for (i = 0 ; i < dev->num_slaves; i++) {
475 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
476 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
477 slave_list[i].res_list[t]);
478 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
479 }
480
481 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
482 dev->num_slaves);
483 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
4af1c048 484 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
c82e9aa0 485
5a0d0a61
JM
486 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
487 struct resource_allocator *res_alloc =
488 &priv->mfunc.master.res_tracker.res_alloc[i];
489 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
490 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
491 if (i == RES_MAC || i == RES_VLAN)
492 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
493 (dev->num_vfs + 1) * sizeof(int),
494 GFP_KERNEL);
495 else
496 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
497
498 if (!res_alloc->quota || !res_alloc->guaranteed ||
499 !res_alloc->allocated)
500 goto no_mem_err;
501
146f3ef4 502 spin_lock_init(&res_alloc->alloc_lock);
5a0d0a61 503 for (t = 0; t < dev->num_vfs + 1; t++) {
449fc488
MB
504 struct mlx4_active_ports actv_ports =
505 mlx4_get_active_ports(dev, t);
5a0d0a61
JM
506 switch (i) {
507 case RES_QP:
508 initialize_res_quotas(dev, res_alloc, RES_QP,
509 t, dev->caps.num_qps -
510 dev->caps.reserved_qps -
511 mlx4_num_reserved_sqps(dev));
512 break;
513 case RES_CQ:
514 initialize_res_quotas(dev, res_alloc, RES_CQ,
515 t, dev->caps.num_cqs -
516 dev->caps.reserved_cqs);
517 break;
518 case RES_SRQ:
519 initialize_res_quotas(dev, res_alloc, RES_SRQ,
520 t, dev->caps.num_srqs -
521 dev->caps.reserved_srqs);
522 break;
523 case RES_MPT:
524 initialize_res_quotas(dev, res_alloc, RES_MPT,
525 t, dev->caps.num_mpts -
526 dev->caps.reserved_mrws);
527 break;
528 case RES_MTT:
529 initialize_res_quotas(dev, res_alloc, RES_MTT,
530 t, dev->caps.num_mtts -
531 dev->caps.reserved_mtts);
532 break;
533 case RES_MAC:
534 if (t == mlx4_master_func_num(dev)) {
449fc488
MB
535 int max_vfs_pport = 0;
536 /* Calculate the max vfs per port for */
537 /* both ports. */
538 for (j = 0; j < dev->caps.num_ports;
539 j++) {
540 struct mlx4_slaves_pport slaves_pport =
541 mlx4_phys_to_slaves_pport(dev, j + 1);
542 unsigned current_slaves =
543 bitmap_weight(slaves_pport.slaves,
544 dev->caps.num_ports) - 1;
545 if (max_vfs_pport < current_slaves)
546 max_vfs_pport =
547 current_slaves;
548 }
549 res_alloc->quota[t] =
550 MLX4_MAX_MAC_NUM -
551 2 * max_vfs_pport;
5a0d0a61
JM
552 res_alloc->guaranteed[t] = 2;
553 for (j = 0; j < MLX4_MAX_PORTS; j++)
449fc488
MB
554 res_alloc->res_port_free[j] =
555 MLX4_MAX_MAC_NUM;
5a0d0a61
JM
556 } else {
557 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
558 res_alloc->guaranteed[t] = 2;
559 }
560 break;
561 case RES_VLAN:
562 if (t == mlx4_master_func_num(dev)) {
563 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
564 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
565 for (j = 0; j < MLX4_MAX_PORTS; j++)
566 res_alloc->res_port_free[j] =
567 res_alloc->quota[t];
568 } else {
569 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
570 res_alloc->guaranteed[t] = 0;
571 }
572 break;
573 case RES_COUNTER:
574 res_alloc->quota[t] = dev->caps.max_counters;
575 res_alloc->guaranteed[t] = 0;
576 if (t == mlx4_master_func_num(dev))
577 res_alloc->res_free = res_alloc->quota[t];
578 break;
579 default:
580 break;
581 }
582 if (i == RES_MAC || i == RES_VLAN) {
449fc488
MB
583 for (j = 0; j < dev->caps.num_ports; j++)
584 if (test_bit(j, actv_ports.ports))
585 res_alloc->res_port_rsvd[j] +=
586 res_alloc->guaranteed[t];
5a0d0a61
JM
587 } else {
588 res_alloc->res_reserved += res_alloc->guaranteed[t];
589 }
590 }
591 }
c82e9aa0 592 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
5a0d0a61
JM
593 return 0;
594
595no_mem_err:
596 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
597 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
598 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
599 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
600 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
601 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
602 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
603 }
604 return -ENOMEM;
c82e9aa0
EC
605}
606
b8924951
JM
607void mlx4_free_resource_tracker(struct mlx4_dev *dev,
608 enum mlx4_res_tracker_free_type type)
c82e9aa0
EC
609{
610 struct mlx4_priv *priv = mlx4_priv(dev);
611 int i;
612
613 if (priv->mfunc.master.res_tracker.slave_list) {
4874080d
JM
614 if (type != RES_TR_FREE_STRUCTS_ONLY) {
615 for (i = 0; i < dev->num_slaves; i++) {
b8924951
JM
616 if (type == RES_TR_FREE_ALL ||
617 dev->caps.function != i)
618 mlx4_delete_all_resources_for_slave(dev, i);
4874080d
JM
619 }
620 /* free master's vlans */
621 i = dev->caps.function;
111c6094 622 mlx4_reset_roce_gids(dev, i);
4874080d
JM
623 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
624 rem_slave_vlans(dev, i);
625 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
626 }
b8924951
JM
627
628 if (type != RES_TR_FREE_SLAVES_ONLY) {
5a0d0a61
JM
629 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
630 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
631 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
632 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
633 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
634 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
635 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
636 }
b8924951
JM
637 kfree(priv->mfunc.master.res_tracker.slave_list);
638 priv->mfunc.master.res_tracker.slave_list = NULL;
639 }
c82e9aa0
EC
640 }
641}
642
54679e14
JM
643static void update_pkey_index(struct mlx4_dev *dev, int slave,
644 struct mlx4_cmd_mailbox *inbox)
c82e9aa0 645{
54679e14
JM
646 u8 sched = *(u8 *)(inbox->buf + 64);
647 u8 orig_index = *(u8 *)(inbox->buf + 35);
648 u8 new_index;
649 struct mlx4_priv *priv = mlx4_priv(dev);
650 int port;
651
652 port = (sched >> 6 & 1) + 1;
653
654 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
655 *(u8 *)(inbox->buf + 35) = new_index;
54679e14
JM
656}
657
658static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
659 u8 slave)
660{
661 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
662 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
663 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
b6ffaeff 664 int port;
c82e9aa0 665
b6ffaeff
JM
666 if (MLX4_QP_ST_UD == ts) {
667 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
668 if (mlx4_is_eth(dev, port))
449fc488
MB
669 qp_ctx->pri_path.mgid_index =
670 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
b6ffaeff
JM
671 else
672 qp_ctx->pri_path.mgid_index = slave | 0x80;
673
674 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
675 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
676 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
677 if (mlx4_is_eth(dev, port)) {
449fc488
MB
678 qp_ctx->pri_path.mgid_index +=
679 mlx4_get_base_gid_ix(dev, slave, port);
b6ffaeff
JM
680 qp_ctx->pri_path.mgid_index &= 0x7f;
681 } else {
682 qp_ctx->pri_path.mgid_index = slave & 0x7F;
683 }
684 }
685 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
686 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
687 if (mlx4_is_eth(dev, port)) {
449fc488
MB
688 qp_ctx->alt_path.mgid_index +=
689 mlx4_get_base_gid_ix(dev, slave, port);
b6ffaeff
JM
690 qp_ctx->alt_path.mgid_index &= 0x7f;
691 } else {
692 qp_ctx->alt_path.mgid_index = slave & 0x7F;
693 }
694 }
54679e14 695 }
c82e9aa0
EC
696}
697
3f7fb021
RE
698static int update_vport_qp_param(struct mlx4_dev *dev,
699 struct mlx4_cmd_mailbox *inbox,
b01978ca 700 u8 slave, u32 qpn)
3f7fb021
RE
701{
702 struct mlx4_qp_context *qpc = inbox->buf + 8;
703 struct mlx4_vport_oper_state *vp_oper;
704 struct mlx4_priv *priv;
09e05c3f 705 u32 qp_type;
3f7fb021
RE
706 int port;
707
708 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
709 priv = mlx4_priv(dev);
710 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
09e05c3f 711 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3f7fb021
RE
712
713 if (MLX4_VGT != vp_oper->state.default_vlan) {
b01978ca
JM
714 /* the reserved QPs (special, proxy, tunnel)
715 * do not operate over vlans
716 */
717 if (mlx4_is_qp_reserved(dev, qpn))
718 return 0;
719
09e05c3f
MB
720 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
721 if (qp_type == MLX4_QP_ST_UD ||
722 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
723 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
724 *(__be32 *)inbox->buf =
725 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
726 MLX4_QP_OPTPAR_VLAN_STRIPPING);
727 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
728 } else {
729 struct mlx4_update_qp_params params = {.flags = 0};
730
731 mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
732 }
733 }
0a6eac24
RE
734
735 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
736 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
737 qpc->pri_path.vlan_control =
738 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
739 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
740 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
741 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
742 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
743 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
744 } else if (0 != vp_oper->state.default_vlan) {
7677fc96
RE
745 qpc->pri_path.vlan_control =
746 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
747 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
748 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
749 } else { /* priority tagged */
750 qpc->pri_path.vlan_control =
751 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
752 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
753 }
754
755 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
3f7fb021 756 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
7677fc96
RE
757 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
758 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
3f7fb021
RE
759 qpc->pri_path.sched_queue &= 0xC7;
760 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
3f7fb021 761 }
e6b6a231 762 if (vp_oper->state.spoofchk) {
7677fc96 763 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
e6b6a231 764 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
e6b6a231 765 }
3f7fb021
RE
766 return 0;
767}
768
c82e9aa0
EC
769static int mpt_mask(struct mlx4_dev *dev)
770{
771 return dev->caps.num_mpts - 1;
772}
773
1e3f7b32 774static void *find_res(struct mlx4_dev *dev, u64 res_id,
c82e9aa0
EC
775 enum mlx4_resource type)
776{
777 struct mlx4_priv *priv = mlx4_priv(dev);
778
4af1c048
HHZ
779 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
780 res_id);
c82e9aa0
EC
781}
782
aa1ec3dd 783static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
784 enum mlx4_resource type,
785 void *res)
786{
787 struct res_common *r;
788 int err = 0;
789
790 spin_lock_irq(mlx4_tlock(dev));
791 r = find_res(dev, res_id, type);
792 if (!r) {
793 err = -ENONET;
794 goto exit;
795 }
796
797 if (r->state == RES_ANY_BUSY) {
798 err = -EBUSY;
799 goto exit;
800 }
801
802 if (r->owner != slave) {
803 err = -EPERM;
804 goto exit;
805 }
806
807 r->from_state = r->state;
808 r->state = RES_ANY_BUSY;
c82e9aa0
EC
809
810 if (res)
811 *((struct res_common **)res) = r;
812
813exit:
814 spin_unlock_irq(mlx4_tlock(dev));
815 return err;
816}
817
818int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
819 enum mlx4_resource type,
aa1ec3dd 820 u64 res_id, int *slave)
c82e9aa0
EC
821{
822
823 struct res_common *r;
824 int err = -ENOENT;
825 int id = res_id;
826
827 if (type == RES_QP)
828 id &= 0x7fffff;
996b0541 829 spin_lock(mlx4_tlock(dev));
c82e9aa0
EC
830
831 r = find_res(dev, id, type);
832 if (r) {
833 *slave = r->owner;
834 err = 0;
835 }
996b0541 836 spin_unlock(mlx4_tlock(dev));
c82e9aa0
EC
837
838 return err;
839}
840
aa1ec3dd 841static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
842 enum mlx4_resource type)
843{
844 struct res_common *r;
845
846 spin_lock_irq(mlx4_tlock(dev));
847 r = find_res(dev, res_id, type);
848 if (r)
849 r->state = r->from_state;
850 spin_unlock_irq(mlx4_tlock(dev));
851}
852
853static struct res_common *alloc_qp_tr(int id)
854{
855 struct res_qp *ret;
856
857 ret = kzalloc(sizeof *ret, GFP_KERNEL);
858 if (!ret)
859 return NULL;
860
861 ret->com.res_id = id;
862 ret->com.state = RES_QP_RESERVED;
2531188b 863 ret->local_qpn = id;
c82e9aa0
EC
864 INIT_LIST_HEAD(&ret->mcg_list);
865 spin_lock_init(&ret->mcg_spl);
2c473ae7 866 atomic_set(&ret->ref_count, 0);
c82e9aa0
EC
867
868 return &ret->com;
869}
870
871static struct res_common *alloc_mtt_tr(int id, int order)
872{
873 struct res_mtt *ret;
874
875 ret = kzalloc(sizeof *ret, GFP_KERNEL);
876 if (!ret)
877 return NULL;
878
879 ret->com.res_id = id;
880 ret->order = order;
881 ret->com.state = RES_MTT_ALLOCATED;
882 atomic_set(&ret->ref_count, 0);
883
884 return &ret->com;
885}
886
887static struct res_common *alloc_mpt_tr(int id, int key)
888{
889 struct res_mpt *ret;
890
891 ret = kzalloc(sizeof *ret, GFP_KERNEL);
892 if (!ret)
893 return NULL;
894
895 ret->com.res_id = id;
896 ret->com.state = RES_MPT_RESERVED;
897 ret->key = key;
898
899 return &ret->com;
900}
901
902static struct res_common *alloc_eq_tr(int id)
903{
904 struct res_eq *ret;
905
906 ret = kzalloc(sizeof *ret, GFP_KERNEL);
907 if (!ret)
908 return NULL;
909
910 ret->com.res_id = id;
911 ret->com.state = RES_EQ_RESERVED;
912
913 return &ret->com;
914}
915
916static struct res_common *alloc_cq_tr(int id)
917{
918 struct res_cq *ret;
919
920 ret = kzalloc(sizeof *ret, GFP_KERNEL);
921 if (!ret)
922 return NULL;
923
924 ret->com.res_id = id;
925 ret->com.state = RES_CQ_ALLOCATED;
926 atomic_set(&ret->ref_count, 0);
927
928 return &ret->com;
929}
930
931static struct res_common *alloc_srq_tr(int id)
932{
933 struct res_srq *ret;
934
935 ret = kzalloc(sizeof *ret, GFP_KERNEL);
936 if (!ret)
937 return NULL;
938
939 ret->com.res_id = id;
940 ret->com.state = RES_SRQ_ALLOCATED;
941 atomic_set(&ret->ref_count, 0);
942
943 return &ret->com;
944}
945
946static struct res_common *alloc_counter_tr(int id)
947{
948 struct res_counter *ret;
949
950 ret = kzalloc(sizeof *ret, GFP_KERNEL);
951 if (!ret)
952 return NULL;
953
954 ret->com.res_id = id;
955 ret->com.state = RES_COUNTER_ALLOCATED;
956
957 return &ret->com;
958}
959
ba062d52
JM
960static struct res_common *alloc_xrcdn_tr(int id)
961{
962 struct res_xrcdn *ret;
963
964 ret = kzalloc(sizeof *ret, GFP_KERNEL);
965 if (!ret)
966 return NULL;
967
968 ret->com.res_id = id;
969 ret->com.state = RES_XRCD_ALLOCATED;
970
971 return &ret->com;
972}
973
2c473ae7 974static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1b9c6b06
HHZ
975{
976 struct res_fs_rule *ret;
977
978 ret = kzalloc(sizeof *ret, GFP_KERNEL);
979 if (!ret)
980 return NULL;
981
982 ret->com.res_id = id;
983 ret->com.state = RES_FS_RULE_ALLOCATED;
2c473ae7 984 ret->qpn = qpn;
1b9c6b06
HHZ
985 return &ret->com;
986}
987
aa1ec3dd 988static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
c82e9aa0
EC
989 int extra)
990{
991 struct res_common *ret;
992
993 switch (type) {
994 case RES_QP:
995 ret = alloc_qp_tr(id);
996 break;
997 case RES_MPT:
998 ret = alloc_mpt_tr(id, extra);
999 break;
1000 case RES_MTT:
1001 ret = alloc_mtt_tr(id, extra);
1002 break;
1003 case RES_EQ:
1004 ret = alloc_eq_tr(id);
1005 break;
1006 case RES_CQ:
1007 ret = alloc_cq_tr(id);
1008 break;
1009 case RES_SRQ:
1010 ret = alloc_srq_tr(id);
1011 break;
1012 case RES_MAC:
c20862c8 1013 pr_err("implementation missing\n");
c82e9aa0
EC
1014 return NULL;
1015 case RES_COUNTER:
1016 ret = alloc_counter_tr(id);
1017 break;
ba062d52
JM
1018 case RES_XRCD:
1019 ret = alloc_xrcdn_tr(id);
1020 break;
1b9c6b06 1021 case RES_FS_RULE:
2c473ae7 1022 ret = alloc_fs_rule_tr(id, extra);
1b9c6b06 1023 break;
c82e9aa0
EC
1024 default:
1025 return NULL;
1026 }
1027 if (ret)
1028 ret->owner = slave;
1029
1030 return ret;
1031}
1032
aa1ec3dd 1033static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
1034 enum mlx4_resource type, int extra)
1035{
1036 int i;
1037 int err;
1038 struct mlx4_priv *priv = mlx4_priv(dev);
1039 struct res_common **res_arr;
1040 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4af1c048 1041 struct rb_root *root = &tracker->res_tree[type];
c82e9aa0
EC
1042
1043 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1044 if (!res_arr)
1045 return -ENOMEM;
1046
1047 for (i = 0; i < count; ++i) {
1048 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1049 if (!res_arr[i]) {
1050 for (--i; i >= 0; --i)
1051 kfree(res_arr[i]);
1052
1053 kfree(res_arr);
1054 return -ENOMEM;
1055 }
1056 }
1057
1058 spin_lock_irq(mlx4_tlock(dev));
1059 for (i = 0; i < count; ++i) {
1060 if (find_res(dev, base + i, type)) {
1061 err = -EEXIST;
1062 goto undo;
1063 }
4af1c048 1064 err = res_tracker_insert(root, res_arr[i]);
c82e9aa0
EC
1065 if (err)
1066 goto undo;
1067 list_add_tail(&res_arr[i]->list,
1068 &tracker->slave_list[slave].res_list[type]);
1069 }
1070 spin_unlock_irq(mlx4_tlock(dev));
1071 kfree(res_arr);
1072
1073 return 0;
1074
1075undo:
1076 for (--i; i >= base; --i)
4af1c048 1077 rb_erase(&res_arr[i]->node, root);
c82e9aa0
EC
1078
1079 spin_unlock_irq(mlx4_tlock(dev));
1080
1081 for (i = 0; i < count; ++i)
1082 kfree(res_arr[i]);
1083
1084 kfree(res_arr);
1085
1086 return err;
1087}
1088
1089static int remove_qp_ok(struct res_qp *res)
1090{
2c473ae7
HHZ
1091 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1092 !list_empty(&res->mcg_list)) {
1093 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1094 res->com.state, atomic_read(&res->ref_count));
c82e9aa0 1095 return -EBUSY;
2c473ae7 1096 } else if (res->com.state != RES_QP_RESERVED) {
c82e9aa0 1097 return -EPERM;
2c473ae7 1098 }
c82e9aa0
EC
1099
1100 return 0;
1101}
1102
1103static int remove_mtt_ok(struct res_mtt *res, int order)
1104{
1105 if (res->com.state == RES_MTT_BUSY ||
1106 atomic_read(&res->ref_count)) {
c20862c8
AV
1107 pr_devel("%s-%d: state %s, ref_count %d\n",
1108 __func__, __LINE__,
1109 mtt_states_str(res->com.state),
1110 atomic_read(&res->ref_count));
c82e9aa0
EC
1111 return -EBUSY;
1112 } else if (res->com.state != RES_MTT_ALLOCATED)
1113 return -EPERM;
1114 else if (res->order != order)
1115 return -EINVAL;
1116
1117 return 0;
1118}
1119
1120static int remove_mpt_ok(struct res_mpt *res)
1121{
1122 if (res->com.state == RES_MPT_BUSY)
1123 return -EBUSY;
1124 else if (res->com.state != RES_MPT_RESERVED)
1125 return -EPERM;
1126
1127 return 0;
1128}
1129
1130static int remove_eq_ok(struct res_eq *res)
1131{
1132 if (res->com.state == RES_MPT_BUSY)
1133 return -EBUSY;
1134 else if (res->com.state != RES_MPT_RESERVED)
1135 return -EPERM;
1136
1137 return 0;
1138}
1139
1140static int remove_counter_ok(struct res_counter *res)
1141{
1142 if (res->com.state == RES_COUNTER_BUSY)
1143 return -EBUSY;
1144 else if (res->com.state != RES_COUNTER_ALLOCATED)
1145 return -EPERM;
1146
1147 return 0;
1148}
1149
ba062d52
JM
1150static int remove_xrcdn_ok(struct res_xrcdn *res)
1151{
1152 if (res->com.state == RES_XRCD_BUSY)
1153 return -EBUSY;
1154 else if (res->com.state != RES_XRCD_ALLOCATED)
1155 return -EPERM;
1156
1157 return 0;
1158}
1159
1b9c6b06
HHZ
1160static int remove_fs_rule_ok(struct res_fs_rule *res)
1161{
1162 if (res->com.state == RES_FS_RULE_BUSY)
1163 return -EBUSY;
1164 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1165 return -EPERM;
1166
1167 return 0;
1168}
1169
c82e9aa0
EC
1170static int remove_cq_ok(struct res_cq *res)
1171{
1172 if (res->com.state == RES_CQ_BUSY)
1173 return -EBUSY;
1174 else if (res->com.state != RES_CQ_ALLOCATED)
1175 return -EPERM;
1176
1177 return 0;
1178}
1179
1180static int remove_srq_ok(struct res_srq *res)
1181{
1182 if (res->com.state == RES_SRQ_BUSY)
1183 return -EBUSY;
1184 else if (res->com.state != RES_SRQ_ALLOCATED)
1185 return -EPERM;
1186
1187 return 0;
1188}
1189
1190static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1191{
1192 switch (type) {
1193 case RES_QP:
1194 return remove_qp_ok((struct res_qp *)res);
1195 case RES_CQ:
1196 return remove_cq_ok((struct res_cq *)res);
1197 case RES_SRQ:
1198 return remove_srq_ok((struct res_srq *)res);
1199 case RES_MPT:
1200 return remove_mpt_ok((struct res_mpt *)res);
1201 case RES_MTT:
1202 return remove_mtt_ok((struct res_mtt *)res, extra);
1203 case RES_MAC:
1204 return -ENOSYS;
1205 case RES_EQ:
1206 return remove_eq_ok((struct res_eq *)res);
1207 case RES_COUNTER:
1208 return remove_counter_ok((struct res_counter *)res);
ba062d52
JM
1209 case RES_XRCD:
1210 return remove_xrcdn_ok((struct res_xrcdn *)res);
1b9c6b06
HHZ
1211 case RES_FS_RULE:
1212 return remove_fs_rule_ok((struct res_fs_rule *)res);
c82e9aa0
EC
1213 default:
1214 return -EINVAL;
1215 }
1216}
1217
aa1ec3dd 1218static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
1219 enum mlx4_resource type, int extra)
1220{
aa1ec3dd 1221 u64 i;
c82e9aa0
EC
1222 int err;
1223 struct mlx4_priv *priv = mlx4_priv(dev);
1224 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1225 struct res_common *r;
1226
1227 spin_lock_irq(mlx4_tlock(dev));
1228 for (i = base; i < base + count; ++i) {
4af1c048 1229 r = res_tracker_lookup(&tracker->res_tree[type], i);
c82e9aa0
EC
1230 if (!r) {
1231 err = -ENOENT;
1232 goto out;
1233 }
1234 if (r->owner != slave) {
1235 err = -EPERM;
1236 goto out;
1237 }
1238 err = remove_ok(r, type, extra);
1239 if (err)
1240 goto out;
1241 }
1242
1243 for (i = base; i < base + count; ++i) {
4af1c048
HHZ
1244 r = res_tracker_lookup(&tracker->res_tree[type], i);
1245 rb_erase(&r->node, &tracker->res_tree[type]);
c82e9aa0
EC
1246 list_del(&r->list);
1247 kfree(r);
1248 }
1249 err = 0;
1250
1251out:
1252 spin_unlock_irq(mlx4_tlock(dev));
1253
1254 return err;
1255}
1256
1257static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1258 enum res_qp_states state, struct res_qp **qp,
1259 int alloc)
1260{
1261 struct mlx4_priv *priv = mlx4_priv(dev);
1262 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1263 struct res_qp *r;
1264 int err = 0;
1265
1266 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1267 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
c82e9aa0
EC
1268 if (!r)
1269 err = -ENOENT;
1270 else if (r->com.owner != slave)
1271 err = -EPERM;
1272 else {
1273 switch (state) {
1274 case RES_QP_BUSY:
aa1ec3dd 1275 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
c82e9aa0
EC
1276 __func__, r->com.res_id);
1277 err = -EBUSY;
1278 break;
1279
1280 case RES_QP_RESERVED:
1281 if (r->com.state == RES_QP_MAPPED && !alloc)
1282 break;
1283
aa1ec3dd 1284 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
c82e9aa0
EC
1285 err = -EINVAL;
1286 break;
1287
1288 case RES_QP_MAPPED:
1289 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1290 r->com.state == RES_QP_HW)
1291 break;
1292 else {
aa1ec3dd 1293 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
c82e9aa0
EC
1294 r->com.res_id);
1295 err = -EINVAL;
1296 }
1297
1298 break;
1299
1300 case RES_QP_HW:
1301 if (r->com.state != RES_QP_MAPPED)
1302 err = -EINVAL;
1303 break;
1304 default:
1305 err = -EINVAL;
1306 }
1307
1308 if (!err) {
1309 r->com.from_state = r->com.state;
1310 r->com.to_state = state;
1311 r->com.state = RES_QP_BUSY;
1312 if (qp)
64699336 1313 *qp = r;
c82e9aa0
EC
1314 }
1315 }
1316
1317 spin_unlock_irq(mlx4_tlock(dev));
1318
1319 return err;
1320}
1321
1322static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1323 enum res_mpt_states state, struct res_mpt **mpt)
1324{
1325 struct mlx4_priv *priv = mlx4_priv(dev);
1326 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1327 struct res_mpt *r;
1328 int err = 0;
1329
1330 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1331 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
c82e9aa0
EC
1332 if (!r)
1333 err = -ENOENT;
1334 else if (r->com.owner != slave)
1335 err = -EPERM;
1336 else {
1337 switch (state) {
1338 case RES_MPT_BUSY:
1339 err = -EINVAL;
1340 break;
1341
1342 case RES_MPT_RESERVED:
1343 if (r->com.state != RES_MPT_MAPPED)
1344 err = -EINVAL;
1345 break;
1346
1347 case RES_MPT_MAPPED:
1348 if (r->com.state != RES_MPT_RESERVED &&
1349 r->com.state != RES_MPT_HW)
1350 err = -EINVAL;
1351 break;
1352
1353 case RES_MPT_HW:
1354 if (r->com.state != RES_MPT_MAPPED)
1355 err = -EINVAL;
1356 break;
1357 default:
1358 err = -EINVAL;
1359 }
1360
1361 if (!err) {
1362 r->com.from_state = r->com.state;
1363 r->com.to_state = state;
1364 r->com.state = RES_MPT_BUSY;
1365 if (mpt)
64699336 1366 *mpt = r;
c82e9aa0
EC
1367 }
1368 }
1369
1370 spin_unlock_irq(mlx4_tlock(dev));
1371
1372 return err;
1373}
1374
1375static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1376 enum res_eq_states state, struct res_eq **eq)
1377{
1378 struct mlx4_priv *priv = mlx4_priv(dev);
1379 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1380 struct res_eq *r;
1381 int err = 0;
1382
1383 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1384 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
c82e9aa0
EC
1385 if (!r)
1386 err = -ENOENT;
1387 else if (r->com.owner != slave)
1388 err = -EPERM;
1389 else {
1390 switch (state) {
1391 case RES_EQ_BUSY:
1392 err = -EINVAL;
1393 break;
1394
1395 case RES_EQ_RESERVED:
1396 if (r->com.state != RES_EQ_HW)
1397 err = -EINVAL;
1398 break;
1399
1400 case RES_EQ_HW:
1401 if (r->com.state != RES_EQ_RESERVED)
1402 err = -EINVAL;
1403 break;
1404
1405 default:
1406 err = -EINVAL;
1407 }
1408
1409 if (!err) {
1410 r->com.from_state = r->com.state;
1411 r->com.to_state = state;
1412 r->com.state = RES_EQ_BUSY;
1413 if (eq)
1414 *eq = r;
1415 }
1416 }
1417
1418 spin_unlock_irq(mlx4_tlock(dev));
1419
1420 return err;
1421}
1422
1423static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1424 enum res_cq_states state, struct res_cq **cq)
1425{
1426 struct mlx4_priv *priv = mlx4_priv(dev);
1427 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1428 struct res_cq *r;
1429 int err;
1430
1431 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1432 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
c9218a9e 1433 if (!r) {
c82e9aa0 1434 err = -ENOENT;
c9218a9e 1435 } else if (r->com.owner != slave) {
c82e9aa0 1436 err = -EPERM;
c9218a9e
PB
1437 } else if (state == RES_CQ_ALLOCATED) {
1438 if (r->com.state != RES_CQ_HW)
c82e9aa0 1439 err = -EINVAL;
c9218a9e
PB
1440 else if (atomic_read(&r->ref_count))
1441 err = -EBUSY;
1442 else
1443 err = 0;
1444 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1445 err = -EINVAL;
1446 } else {
1447 err = 0;
1448 }
c82e9aa0 1449
c9218a9e
PB
1450 if (!err) {
1451 r->com.from_state = r->com.state;
1452 r->com.to_state = state;
1453 r->com.state = RES_CQ_BUSY;
1454 if (cq)
1455 *cq = r;
c82e9aa0
EC
1456 }
1457
1458 spin_unlock_irq(mlx4_tlock(dev));
1459
1460 return err;
1461}
1462
1463static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
f088cbb8 1464 enum res_srq_states state, struct res_srq **srq)
c82e9aa0
EC
1465{
1466 struct mlx4_priv *priv = mlx4_priv(dev);
1467 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1468 struct res_srq *r;
1469 int err = 0;
1470
1471 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1472 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
f088cbb8 1473 if (!r) {
c82e9aa0 1474 err = -ENOENT;
f088cbb8 1475 } else if (r->com.owner != slave) {
c82e9aa0 1476 err = -EPERM;
f088cbb8
PB
1477 } else if (state == RES_SRQ_ALLOCATED) {
1478 if (r->com.state != RES_SRQ_HW)
c82e9aa0 1479 err = -EINVAL;
f088cbb8
PB
1480 else if (atomic_read(&r->ref_count))
1481 err = -EBUSY;
1482 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1483 err = -EINVAL;
1484 }
c82e9aa0 1485
f088cbb8
PB
1486 if (!err) {
1487 r->com.from_state = r->com.state;
1488 r->com.to_state = state;
1489 r->com.state = RES_SRQ_BUSY;
1490 if (srq)
1491 *srq = r;
c82e9aa0
EC
1492 }
1493
1494 spin_unlock_irq(mlx4_tlock(dev));
1495
1496 return err;
1497}
1498
1499static void res_abort_move(struct mlx4_dev *dev, int slave,
1500 enum mlx4_resource type, int id)
1501{
1502 struct mlx4_priv *priv = mlx4_priv(dev);
1503 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1504 struct res_common *r;
1505
1506 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1507 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1508 if (r && (r->owner == slave))
1509 r->state = r->from_state;
1510 spin_unlock_irq(mlx4_tlock(dev));
1511}
1512
1513static void res_end_move(struct mlx4_dev *dev, int slave,
1514 enum mlx4_resource type, int id)
1515{
1516 struct mlx4_priv *priv = mlx4_priv(dev);
1517 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1518 struct res_common *r;
1519
1520 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1521 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1522 if (r && (r->owner == slave))
1523 r->state = r->to_state;
1524 spin_unlock_irq(mlx4_tlock(dev));
1525}
1526
1527static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1528{
e2c76824
JM
1529 return mlx4_is_qp_reserved(dev, qpn) &&
1530 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
c82e9aa0
EC
1531}
1532
54679e14
JM
1533static int fw_reserved(struct mlx4_dev *dev, int qpn)
1534{
1535 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
c82e9aa0
EC
1536}
1537
1538static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1539 u64 in_param, u64 *out_param)
1540{
1541 int err;
1542 int count;
1543 int align;
1544 int base;
1545 int qpn;
ddae0349 1546 u8 flags;
c82e9aa0
EC
1547
1548 switch (op) {
1549 case RES_OP_RESERVE:
2d5c57d7 1550 count = get_param_l(&in_param) & 0xffffff;
ddae0349
EE
1551 /* Turn off all unsupported QP allocation flags that the
1552 * slave tries to set.
1553 */
1554 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
c82e9aa0 1555 align = get_param_h(&in_param);
146f3ef4 1556 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
c82e9aa0
EC
1557 if (err)
1558 return err;
1559
ddae0349 1560 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
146f3ef4
JM
1561 if (err) {
1562 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1563 return err;
1564 }
1565
c82e9aa0
EC
1566 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1567 if (err) {
146f3ef4 1568 mlx4_release_resource(dev, slave, RES_QP, count, 0);
c82e9aa0
EC
1569 __mlx4_qp_release_range(dev, base, count);
1570 return err;
1571 }
1572 set_param_l(out_param, base);
1573 break;
1574 case RES_OP_MAP_ICM:
1575 qpn = get_param_l(&in_param) & 0x7fffff;
1576 if (valid_reserved(dev, slave, qpn)) {
1577 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1578 if (err)
1579 return err;
1580 }
1581
1582 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1583 NULL, 1);
1584 if (err)
1585 return err;
1586
54679e14 1587 if (!fw_reserved(dev, qpn)) {
40f2287b 1588 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
c82e9aa0
EC
1589 if (err) {
1590 res_abort_move(dev, slave, RES_QP, qpn);
1591 return err;
1592 }
1593 }
1594
1595 res_end_move(dev, slave, RES_QP, qpn);
1596 break;
1597
1598 default:
1599 err = -EINVAL;
1600 break;
1601 }
1602 return err;
1603}
1604
1605static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1606 u64 in_param, u64 *out_param)
1607{
1608 int err = -EINVAL;
1609 int base;
1610 int order;
1611
1612 if (op != RES_OP_RESERVE_AND_MAP)
1613 return err;
1614
1615 order = get_param_l(&in_param);
146f3ef4
JM
1616
1617 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1618 if (err)
1619 return err;
1620
c82e9aa0 1621 base = __mlx4_alloc_mtt_range(dev, order);
146f3ef4
JM
1622 if (base == -1) {
1623 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
c82e9aa0 1624 return -ENOMEM;
146f3ef4 1625 }
c82e9aa0
EC
1626
1627 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
146f3ef4
JM
1628 if (err) {
1629 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
c82e9aa0 1630 __mlx4_free_mtt_range(dev, base, order);
146f3ef4 1631 } else {
c82e9aa0 1632 set_param_l(out_param, base);
146f3ef4 1633 }
c82e9aa0
EC
1634
1635 return err;
1636}
1637
1638static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1639 u64 in_param, u64 *out_param)
1640{
1641 int err = -EINVAL;
1642 int index;
1643 int id;
1644 struct res_mpt *mpt;
1645
1646 switch (op) {
1647 case RES_OP_RESERVE:
146f3ef4
JM
1648 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1649 if (err)
1650 break;
1651
b20e519a 1652 index = __mlx4_mpt_reserve(dev);
146f3ef4
JM
1653 if (index == -1) {
1654 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
c82e9aa0 1655 break;
146f3ef4 1656 }
c82e9aa0
EC
1657 id = index & mpt_mask(dev);
1658
1659 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1660 if (err) {
146f3ef4 1661 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
b20e519a 1662 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
1663 break;
1664 }
1665 set_param_l(out_param, index);
1666 break;
1667 case RES_OP_MAP_ICM:
1668 index = get_param_l(&in_param);
1669 id = index & mpt_mask(dev);
1670 err = mr_res_start_move_to(dev, slave, id,
1671 RES_MPT_MAPPED, &mpt);
1672 if (err)
1673 return err;
1674
40f2287b 1675 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
c82e9aa0
EC
1676 if (err) {
1677 res_abort_move(dev, slave, RES_MPT, id);
1678 return err;
1679 }
1680
1681 res_end_move(dev, slave, RES_MPT, id);
1682 break;
1683 }
1684 return err;
1685}
1686
1687static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1688 u64 in_param, u64 *out_param)
1689{
1690 int cqn;
1691 int err;
1692
1693 switch (op) {
1694 case RES_OP_RESERVE_AND_MAP:
146f3ef4 1695 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
c82e9aa0
EC
1696 if (err)
1697 break;
1698
146f3ef4
JM
1699 err = __mlx4_cq_alloc_icm(dev, &cqn);
1700 if (err) {
1701 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1702 break;
1703 }
1704
c82e9aa0
EC
1705 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1706 if (err) {
146f3ef4 1707 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
c82e9aa0
EC
1708 __mlx4_cq_free_icm(dev, cqn);
1709 break;
1710 }
1711
1712 set_param_l(out_param, cqn);
1713 break;
1714
1715 default:
1716 err = -EINVAL;
1717 }
1718
1719 return err;
1720}
1721
1722static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1723 u64 in_param, u64 *out_param)
1724{
1725 int srqn;
1726 int err;
1727
1728 switch (op) {
1729 case RES_OP_RESERVE_AND_MAP:
146f3ef4 1730 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
c82e9aa0
EC
1731 if (err)
1732 break;
1733
146f3ef4
JM
1734 err = __mlx4_srq_alloc_icm(dev, &srqn);
1735 if (err) {
1736 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1737 break;
1738 }
1739
c82e9aa0
EC
1740 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1741 if (err) {
146f3ef4 1742 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
c82e9aa0
EC
1743 __mlx4_srq_free_icm(dev, srqn);
1744 break;
1745 }
1746
1747 set_param_l(out_param, srqn);
1748 break;
1749
1750 default:
1751 err = -EINVAL;
1752 }
1753
1754 return err;
1755}
1756
2f5bb473
JM
1757static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1758 u8 smac_index, u64 *mac)
1759{
1760 struct mlx4_priv *priv = mlx4_priv(dev);
1761 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1762 struct list_head *mac_list =
1763 &tracker->slave_list[slave].res_list[RES_MAC];
1764 struct mac_res *res, *tmp;
1765
1766 list_for_each_entry_safe(res, tmp, mac_list, list) {
1767 if (res->smac_index == smac_index && res->port == (u8) port) {
1768 *mac = res->mac;
1769 return 0;
1770 }
1771 }
1772 return -ENOENT;
1773}
1774
1775static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
c82e9aa0
EC
1776{
1777 struct mlx4_priv *priv = mlx4_priv(dev);
1778 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2f5bb473
JM
1779 struct list_head *mac_list =
1780 &tracker->slave_list[slave].res_list[RES_MAC];
1781 struct mac_res *res, *tmp;
1782
1783 list_for_each_entry_safe(res, tmp, mac_list, list) {
1784 if (res->mac == mac && res->port == (u8) port) {
1785 /* mac found. update ref count */
1786 ++res->ref_count;
1787 return 0;
1788 }
1789 }
c82e9aa0 1790
146f3ef4
JM
1791 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1792 return -EINVAL;
c82e9aa0 1793 res = kzalloc(sizeof *res, GFP_KERNEL);
146f3ef4
JM
1794 if (!res) {
1795 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
c82e9aa0 1796 return -ENOMEM;
146f3ef4 1797 }
c82e9aa0
EC
1798 res->mac = mac;
1799 res->port = (u8) port;
2f5bb473
JM
1800 res->smac_index = smac_index;
1801 res->ref_count = 1;
c82e9aa0
EC
1802 list_add_tail(&res->list,
1803 &tracker->slave_list[slave].res_list[RES_MAC]);
1804 return 0;
1805}
1806
1807static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1808 int port)
1809{
1810 struct mlx4_priv *priv = mlx4_priv(dev);
1811 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1812 struct list_head *mac_list =
1813 &tracker->slave_list[slave].res_list[RES_MAC];
1814 struct mac_res *res, *tmp;
1815
1816 list_for_each_entry_safe(res, tmp, mac_list, list) {
1817 if (res->mac == mac && res->port == (u8) port) {
2f5bb473
JM
1818 if (!--res->ref_count) {
1819 list_del(&res->list);
1820 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1821 kfree(res);
1822 }
c82e9aa0
EC
1823 break;
1824 }
1825 }
1826}
1827
1828static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1829{
1830 struct mlx4_priv *priv = mlx4_priv(dev);
1831 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1832 struct list_head *mac_list =
1833 &tracker->slave_list[slave].res_list[RES_MAC];
1834 struct mac_res *res, *tmp;
2f5bb473 1835 int i;
c82e9aa0
EC
1836
1837 list_for_each_entry_safe(res, tmp, mac_list, list) {
1838 list_del(&res->list);
2f5bb473
JM
1839 /* dereference the mac the num times the slave referenced it */
1840 for (i = 0; i < res->ref_count; i++)
1841 __mlx4_unregister_mac(dev, res->port, res->mac);
146f3ef4 1842 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
c82e9aa0
EC
1843 kfree(res);
1844 }
1845}
1846
1847static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 1848 u64 in_param, u64 *out_param, int in_port)
c82e9aa0
EC
1849{
1850 int err = -EINVAL;
1851 int port;
1852 u64 mac;
2f5bb473 1853 u8 smac_index;
c82e9aa0
EC
1854
1855 if (op != RES_OP_RESERVE_AND_MAP)
1856 return err;
1857
acddd5dd 1858 port = !in_port ? get_param_l(out_param) : in_port;
449fc488
MB
1859 port = mlx4_slave_convert_port(
1860 dev, slave, port);
1861
1862 if (port < 0)
1863 return -EINVAL;
c82e9aa0
EC
1864 mac = in_param;
1865
1866 err = __mlx4_register_mac(dev, port, mac);
1867 if (err >= 0) {
2f5bb473 1868 smac_index = err;
c82e9aa0
EC
1869 set_param_l(out_param, err);
1870 err = 0;
1871 }
1872
1873 if (!err) {
2f5bb473 1874 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
c82e9aa0
EC
1875 if (err)
1876 __mlx4_unregister_mac(dev, port, mac);
1877 }
1878 return err;
1879}
1880
4874080d
JM
1881static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1882 int port, int vlan_index)
ffe455ad 1883{
4874080d
JM
1884 struct mlx4_priv *priv = mlx4_priv(dev);
1885 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1886 struct list_head *vlan_list =
1887 &tracker->slave_list[slave].res_list[RES_VLAN];
1888 struct vlan_res *res, *tmp;
1889
1890 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1891 if (res->vlan == vlan && res->port == (u8) port) {
1892 /* vlan found. update ref count */
1893 ++res->ref_count;
1894 return 0;
1895 }
1896 }
1897
146f3ef4
JM
1898 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1899 return -EINVAL;
4874080d 1900 res = kzalloc(sizeof(*res), GFP_KERNEL);
146f3ef4
JM
1901 if (!res) {
1902 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
4874080d 1903 return -ENOMEM;
146f3ef4 1904 }
4874080d
JM
1905 res->vlan = vlan;
1906 res->port = (u8) port;
1907 res->vlan_index = vlan_index;
1908 res->ref_count = 1;
1909 list_add_tail(&res->list,
1910 &tracker->slave_list[slave].res_list[RES_VLAN]);
ffe455ad
EE
1911 return 0;
1912}
1913
4874080d
JM
1914
1915static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1916 int port)
1917{
1918 struct mlx4_priv *priv = mlx4_priv(dev);
1919 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1920 struct list_head *vlan_list =
1921 &tracker->slave_list[slave].res_list[RES_VLAN];
1922 struct vlan_res *res, *tmp;
1923
1924 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1925 if (res->vlan == vlan && res->port == (u8) port) {
1926 if (!--res->ref_count) {
1927 list_del(&res->list);
146f3ef4
JM
1928 mlx4_release_resource(dev, slave, RES_VLAN,
1929 1, port);
4874080d
JM
1930 kfree(res);
1931 }
1932 break;
1933 }
1934 }
1935}
1936
1937static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1938{
1939 struct mlx4_priv *priv = mlx4_priv(dev);
1940 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1941 struct list_head *vlan_list =
1942 &tracker->slave_list[slave].res_list[RES_VLAN];
1943 struct vlan_res *res, *tmp;
1944 int i;
1945
1946 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1947 list_del(&res->list);
1948 /* dereference the vlan the num times the slave referenced it */
1949 for (i = 0; i < res->ref_count; i++)
1950 __mlx4_unregister_vlan(dev, res->port, res->vlan);
146f3ef4 1951 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
4874080d
JM
1952 kfree(res);
1953 }
1954}
1955
1956static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2c957ff2 1957 u64 in_param, u64 *out_param, int in_port)
4874080d 1958{
2c957ff2
JM
1959 struct mlx4_priv *priv = mlx4_priv(dev);
1960 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
4874080d
JM
1961 int err;
1962 u16 vlan;
1963 int vlan_index;
2c957ff2
JM
1964 int port;
1965
1966 port = !in_port ? get_param_l(out_param) : in_port;
4874080d
JM
1967
1968 if (!port || op != RES_OP_RESERVE_AND_MAP)
1969 return -EINVAL;
1970
449fc488
MB
1971 port = mlx4_slave_convert_port(
1972 dev, slave, port);
1973
1974 if (port < 0)
1975 return -EINVAL;
2c957ff2
JM
1976 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1977 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1978 slave_state[slave].old_vlan_api = true;
1979 return 0;
1980 }
1981
4874080d
JM
1982 vlan = (u16) in_param;
1983
1984 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1985 if (!err) {
1986 set_param_l(out_param, (u32) vlan_index);
1987 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1988 if (err)
1989 __mlx4_unregister_vlan(dev, port, vlan);
1990 }
1991 return err;
1992}
1993
ba062d52
JM
1994static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1995 u64 in_param, u64 *out_param)
1996{
1997 u32 index;
1998 int err;
1999
2000 if (op != RES_OP_RESERVE)
2001 return -EINVAL;
2002
146f3ef4 2003 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
ba062d52
JM
2004 if (err)
2005 return err;
2006
146f3ef4
JM
2007 err = __mlx4_counter_alloc(dev, &index);
2008 if (err) {
2009 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2010 return err;
2011 }
2012
ba062d52 2013 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
146f3ef4 2014 if (err) {
ba062d52 2015 __mlx4_counter_free(dev, index);
146f3ef4
JM
2016 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2017 } else {
ba062d52 2018 set_param_l(out_param, index);
146f3ef4 2019 }
ba062d52
JM
2020
2021 return err;
2022}
2023
2024static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2025 u64 in_param, u64 *out_param)
2026{
2027 u32 xrcdn;
2028 int err;
2029
2030 if (op != RES_OP_RESERVE)
2031 return -EINVAL;
2032
2033 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2034 if (err)
2035 return err;
2036
2037 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2038 if (err)
2039 __mlx4_xrcd_free(dev, xrcdn);
2040 else
2041 set_param_l(out_param, xrcdn);
2042
2043 return err;
2044}
2045
c82e9aa0
EC
2046int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2047 struct mlx4_vhcr *vhcr,
2048 struct mlx4_cmd_mailbox *inbox,
2049 struct mlx4_cmd_mailbox *outbox,
2050 struct mlx4_cmd_info *cmd)
2051{
2052 int err;
2053 int alop = vhcr->op_modifier;
2054
acddd5dd 2055 switch (vhcr->in_modifier & 0xFF) {
c82e9aa0
EC
2056 case RES_QP:
2057 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2058 vhcr->in_param, &vhcr->out_param);
2059 break;
2060
2061 case RES_MTT:
2062 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2063 vhcr->in_param, &vhcr->out_param);
2064 break;
2065
2066 case RES_MPT:
2067 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2068 vhcr->in_param, &vhcr->out_param);
2069 break;
2070
2071 case RES_CQ:
2072 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2073 vhcr->in_param, &vhcr->out_param);
2074 break;
2075
2076 case RES_SRQ:
2077 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2078 vhcr->in_param, &vhcr->out_param);
2079 break;
2080
2081 case RES_MAC:
2082 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
2083 vhcr->in_param, &vhcr->out_param,
2084 (vhcr->in_modifier >> 8) & 0xFF);
c82e9aa0
EC
2085 break;
2086
ffe455ad
EE
2087 case RES_VLAN:
2088 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
2089 vhcr->in_param, &vhcr->out_param,
2090 (vhcr->in_modifier >> 8) & 0xFF);
ffe455ad
EE
2091 break;
2092
ba062d52
JM
2093 case RES_COUNTER:
2094 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2095 vhcr->in_param, &vhcr->out_param);
2096 break;
2097
2098 case RES_XRCD:
2099 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2100 vhcr->in_param, &vhcr->out_param);
2101 break;
2102
c82e9aa0
EC
2103 default:
2104 err = -EINVAL;
2105 break;
2106 }
2107
2108 return err;
2109}
2110
2111static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2112 u64 in_param)
2113{
2114 int err;
2115 int count;
2116 int base;
2117 int qpn;
2118
2119 switch (op) {
2120 case RES_OP_RESERVE:
2121 base = get_param_l(&in_param) & 0x7fffff;
2122 count = get_param_h(&in_param);
2123 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2124 if (err)
2125 break;
146f3ef4 2126 mlx4_release_resource(dev, slave, RES_QP, count, 0);
c82e9aa0
EC
2127 __mlx4_qp_release_range(dev, base, count);
2128 break;
2129 case RES_OP_MAP_ICM:
2130 qpn = get_param_l(&in_param) & 0x7fffff;
2131 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2132 NULL, 0);
2133 if (err)
2134 return err;
2135
54679e14 2136 if (!fw_reserved(dev, qpn))
c82e9aa0
EC
2137 __mlx4_qp_free_icm(dev, qpn);
2138
2139 res_end_move(dev, slave, RES_QP, qpn);
2140
2141 if (valid_reserved(dev, slave, qpn))
2142 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2143 break;
2144 default:
2145 err = -EINVAL;
2146 break;
2147 }
2148 return err;
2149}
2150
2151static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2152 u64 in_param, u64 *out_param)
2153{
2154 int err = -EINVAL;
2155 int base;
2156 int order;
2157
2158 if (op != RES_OP_RESERVE_AND_MAP)
2159 return err;
2160
2161 base = get_param_l(&in_param);
2162 order = get_param_h(&in_param);
2163 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
146f3ef4
JM
2164 if (!err) {
2165 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
c82e9aa0 2166 __mlx4_free_mtt_range(dev, base, order);
146f3ef4 2167 }
c82e9aa0
EC
2168 return err;
2169}
2170
2171static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2172 u64 in_param)
2173{
2174 int err = -EINVAL;
2175 int index;
2176 int id;
2177 struct res_mpt *mpt;
2178
2179 switch (op) {
2180 case RES_OP_RESERVE:
2181 index = get_param_l(&in_param);
2182 id = index & mpt_mask(dev);
2183 err = get_res(dev, slave, id, RES_MPT, &mpt);
2184 if (err)
2185 break;
2186 index = mpt->key;
2187 put_res(dev, slave, id, RES_MPT);
2188
2189 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2190 if (err)
2191 break;
146f3ef4 2192 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
b20e519a 2193 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
2194 break;
2195 case RES_OP_MAP_ICM:
2196 index = get_param_l(&in_param);
2197 id = index & mpt_mask(dev);
2198 err = mr_res_start_move_to(dev, slave, id,
2199 RES_MPT_RESERVED, &mpt);
2200 if (err)
2201 return err;
2202
b20e519a 2203 __mlx4_mpt_free_icm(dev, mpt->key);
c82e9aa0
EC
2204 res_end_move(dev, slave, RES_MPT, id);
2205 return err;
2206 break;
2207 default:
2208 err = -EINVAL;
2209 break;
2210 }
2211 return err;
2212}
2213
2214static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2215 u64 in_param, u64 *out_param)
2216{
2217 int cqn;
2218 int err;
2219
2220 switch (op) {
2221 case RES_OP_RESERVE_AND_MAP:
2222 cqn = get_param_l(&in_param);
2223 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2224 if (err)
2225 break;
2226
146f3ef4 2227 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
c82e9aa0
EC
2228 __mlx4_cq_free_icm(dev, cqn);
2229 break;
2230
2231 default:
2232 err = -EINVAL;
2233 break;
2234 }
2235
2236 return err;
2237}
2238
2239static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2240 u64 in_param, u64 *out_param)
2241{
2242 int srqn;
2243 int err;
2244
2245 switch (op) {
2246 case RES_OP_RESERVE_AND_MAP:
2247 srqn = get_param_l(&in_param);
2248 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2249 if (err)
2250 break;
2251
146f3ef4 2252 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
c82e9aa0
EC
2253 __mlx4_srq_free_icm(dev, srqn);
2254 break;
2255
2256 default:
2257 err = -EINVAL;
2258 break;
2259 }
2260
2261 return err;
2262}
2263
2264static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 2265 u64 in_param, u64 *out_param, int in_port)
c82e9aa0
EC
2266{
2267 int port;
2268 int err = 0;
2269
2270 switch (op) {
2271 case RES_OP_RESERVE_AND_MAP:
acddd5dd 2272 port = !in_port ? get_param_l(out_param) : in_port;
449fc488
MB
2273 port = mlx4_slave_convert_port(
2274 dev, slave, port);
2275
2276 if (port < 0)
2277 return -EINVAL;
c82e9aa0
EC
2278 mac_del_from_slave(dev, slave, in_param, port);
2279 __mlx4_unregister_mac(dev, port, in_param);
2280 break;
2281 default:
2282 err = -EINVAL;
2283 break;
2284 }
2285
2286 return err;
2287
2288}
2289
ffe455ad 2290static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 2291 u64 in_param, u64 *out_param, int port)
ffe455ad 2292{
2c957ff2
JM
2293 struct mlx4_priv *priv = mlx4_priv(dev);
2294 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
4874080d
JM
2295 int err = 0;
2296
449fc488
MB
2297 port = mlx4_slave_convert_port(
2298 dev, slave, port);
2299
2300 if (port < 0)
2301 return -EINVAL;
4874080d
JM
2302 switch (op) {
2303 case RES_OP_RESERVE_AND_MAP:
2c957ff2
JM
2304 if (slave_state[slave].old_vlan_api)
2305 return 0;
4874080d
JM
2306 if (!port)
2307 return -EINVAL;
2308 vlan_del_from_slave(dev, slave, in_param, port);
2309 __mlx4_unregister_vlan(dev, port, in_param);
2310 break;
2311 default:
2312 err = -EINVAL;
2313 break;
2314 }
2315
2316 return err;
ffe455ad
EE
2317}
2318
ba062d52
JM
2319static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2320 u64 in_param, u64 *out_param)
2321{
2322 int index;
2323 int err;
2324
2325 if (op != RES_OP_RESERVE)
2326 return -EINVAL;
2327
2328 index = get_param_l(&in_param);
2329 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2330 if (err)
2331 return err;
2332
2333 __mlx4_counter_free(dev, index);
146f3ef4 2334 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
ba062d52
JM
2335
2336 return err;
2337}
2338
2339static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2340 u64 in_param, u64 *out_param)
2341{
2342 int xrcdn;
2343 int err;
2344
2345 if (op != RES_OP_RESERVE)
2346 return -EINVAL;
2347
2348 xrcdn = get_param_l(&in_param);
2349 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2350 if (err)
2351 return err;
2352
2353 __mlx4_xrcd_free(dev, xrcdn);
2354
2355 return err;
2356}
2357
c82e9aa0
EC
2358int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2359 struct mlx4_vhcr *vhcr,
2360 struct mlx4_cmd_mailbox *inbox,
2361 struct mlx4_cmd_mailbox *outbox,
2362 struct mlx4_cmd_info *cmd)
2363{
2364 int err = -EINVAL;
2365 int alop = vhcr->op_modifier;
2366
acddd5dd 2367 switch (vhcr->in_modifier & 0xFF) {
c82e9aa0
EC
2368 case RES_QP:
2369 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2370 vhcr->in_param);
2371 break;
2372
2373 case RES_MTT:
2374 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2375 vhcr->in_param, &vhcr->out_param);
2376 break;
2377
2378 case RES_MPT:
2379 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2380 vhcr->in_param);
2381 break;
2382
2383 case RES_CQ:
2384 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2385 vhcr->in_param, &vhcr->out_param);
2386 break;
2387
2388 case RES_SRQ:
2389 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2390 vhcr->in_param, &vhcr->out_param);
2391 break;
2392
2393 case RES_MAC:
2394 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
2395 vhcr->in_param, &vhcr->out_param,
2396 (vhcr->in_modifier >> 8) & 0xFF);
c82e9aa0
EC
2397 break;
2398
ffe455ad
EE
2399 case RES_VLAN:
2400 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
2401 vhcr->in_param, &vhcr->out_param,
2402 (vhcr->in_modifier >> 8) & 0xFF);
ffe455ad
EE
2403 break;
2404
ba062d52
JM
2405 case RES_COUNTER:
2406 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2407 vhcr->in_param, &vhcr->out_param);
2408 break;
2409
2410 case RES_XRCD:
2411 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2412 vhcr->in_param, &vhcr->out_param);
2413
c82e9aa0
EC
2414 default:
2415 break;
2416 }
2417 return err;
2418}
2419
2420/* ugly but other choices are uglier */
2421static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2422{
2423 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2424}
2425
2b8fb286 2426static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
c82e9aa0 2427{
2b8fb286 2428 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
c82e9aa0
EC
2429}
2430
2431static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2432{
2433 return be32_to_cpu(mpt->mtt_sz);
2434}
2435
cc1ade94
SM
2436static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2437{
2438 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2439}
2440
2441static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2442{
2443 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2444}
2445
2446static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2447{
2448 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2449}
2450
2451static int mr_is_region(struct mlx4_mpt_entry *mpt)
2452{
2453 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2454}
2455
2b8fb286 2456static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
c82e9aa0
EC
2457{
2458 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2459}
2460
2b8fb286 2461static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
c82e9aa0
EC
2462{
2463 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2464}
2465
2466static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2467{
2468 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2469 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2470 int log_sq_sride = qpc->sq_size_stride & 7;
2471 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2472 int log_rq_stride = qpc->rq_size_stride & 7;
2473 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2474 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
5c5f3f0a
YH
2475 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2476 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
c82e9aa0
EC
2477 int sq_size;
2478 int rq_size;
2479 int total_pages;
2480 int total_mem;
2481 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2482
2483 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2484 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2485 total_mem = sq_size + rq_size;
2486 total_pages =
2487 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2488 page_shift);
2489
2490 return total_pages;
2491}
2492
c82e9aa0
EC
2493static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2494 int size, struct res_mtt *mtt)
2495{
2b8fb286
MA
2496 int res_start = mtt->com.res_id;
2497 int res_size = (1 << mtt->order);
c82e9aa0
EC
2498
2499 if (start < res_start || start + size > res_start + res_size)
2500 return -EPERM;
2501 return 0;
2502}
2503
2504int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2505 struct mlx4_vhcr *vhcr,
2506 struct mlx4_cmd_mailbox *inbox,
2507 struct mlx4_cmd_mailbox *outbox,
2508 struct mlx4_cmd_info *cmd)
2509{
2510 int err;
2511 int index = vhcr->in_modifier;
2512 struct res_mtt *mtt;
2513 struct res_mpt *mpt;
2b8fb286 2514 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2515 int phys;
2516 int id;
cc1ade94
SM
2517 u32 pd;
2518 int pd_slave;
c82e9aa0
EC
2519
2520 id = index & mpt_mask(dev);
2521 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2522 if (err)
2523 return err;
2524
cc1ade94
SM
2525 /* Disable memory windows for VFs. */
2526 if (!mr_is_region(inbox->buf)) {
2527 err = -EPERM;
2528 goto ex_abort;
2529 }
2530
2531 /* Make sure that the PD bits related to the slave id are zeros. */
2532 pd = mr_get_pd(inbox->buf);
2533 pd_slave = (pd >> 17) & 0x7f;
2534 if (pd_slave != 0 && pd_slave != slave) {
2535 err = -EPERM;
2536 goto ex_abort;
2537 }
2538
2539 if (mr_is_fmr(inbox->buf)) {
2540 /* FMR and Bind Enable are forbidden in slave devices. */
2541 if (mr_is_bind_enabled(inbox->buf)) {
2542 err = -EPERM;
2543 goto ex_abort;
2544 }
2545 /* FMR and Memory Windows are also forbidden. */
2546 if (!mr_is_region(inbox->buf)) {
2547 err = -EPERM;
2548 goto ex_abort;
2549 }
2550 }
2551
c82e9aa0
EC
2552 phys = mr_phys_mpt(inbox->buf);
2553 if (!phys) {
2b8fb286 2554 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2555 if (err)
2556 goto ex_abort;
2557
2558 err = check_mtt_range(dev, slave, mtt_base,
2559 mr_get_mtt_size(inbox->buf), mtt);
2560 if (err)
2561 goto ex_put;
2562
2563 mpt->mtt = mtt;
2564 }
2565
c82e9aa0
EC
2566 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2567 if (err)
2568 goto ex_put;
2569
2570 if (!phys) {
2571 atomic_inc(&mtt->ref_count);
2572 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2573 }
2574
2575 res_end_move(dev, slave, RES_MPT, id);
2576 return 0;
2577
2578ex_put:
2579 if (!phys)
2580 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2581ex_abort:
2582 res_abort_move(dev, slave, RES_MPT, id);
2583
2584 return err;
2585}
2586
2587int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2588 struct mlx4_vhcr *vhcr,
2589 struct mlx4_cmd_mailbox *inbox,
2590 struct mlx4_cmd_mailbox *outbox,
2591 struct mlx4_cmd_info *cmd)
2592{
2593 int err;
2594 int index = vhcr->in_modifier;
2595 struct res_mpt *mpt;
2596 int id;
2597
2598 id = index & mpt_mask(dev);
2599 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2600 if (err)
2601 return err;
2602
2603 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2604 if (err)
2605 goto ex_abort;
2606
2607 if (mpt->mtt)
2608 atomic_dec(&mpt->mtt->ref_count);
2609
2610 res_end_move(dev, slave, RES_MPT, id);
2611 return 0;
2612
2613ex_abort:
2614 res_abort_move(dev, slave, RES_MPT, id);
2615
2616 return err;
2617}
2618
2619int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2620 struct mlx4_vhcr *vhcr,
2621 struct mlx4_cmd_mailbox *inbox,
2622 struct mlx4_cmd_mailbox *outbox,
2623 struct mlx4_cmd_info *cmd)
2624{
2625 int err;
2626 int index = vhcr->in_modifier;
2627 struct res_mpt *mpt;
2628 int id;
2629
2630 id = index & mpt_mask(dev);
2631 err = get_res(dev, slave, id, RES_MPT, &mpt);
2632 if (err)
2633 return err;
2634
e630664c
MB
2635 if (mpt->com.from_state == RES_MPT_MAPPED) {
2636 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2637 * that, the VF must read the MPT. But since the MPT entry memory is not
2638 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2639 * entry contents. To guarantee that the MPT cannot be changed, the driver
2640 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2641 * ownership fofollowing the change. The change here allows the VF to
2642 * perform QUERY_MPT also when the entry is in SW ownership.
2643 */
2644 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2645 &mlx4_priv(dev)->mr_table.dmpt_table,
2646 mpt->key, NULL);
2647
2648 if (NULL == mpt_entry || NULL == outbox->buf) {
2649 err = -EINVAL;
2650 goto out;
2651 }
2652
2653 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2654
2655 err = 0;
2656 } else if (mpt->com.from_state == RES_MPT_HW) {
2657 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2658 } else {
c82e9aa0
EC
2659 err = -EBUSY;
2660 goto out;
2661 }
2662
c82e9aa0
EC
2663
2664out:
2665 put_res(dev, slave, id, RES_MPT);
2666 return err;
2667}
2668
2669static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2670{
2671 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2672}
2673
2674static int qp_get_scqn(struct mlx4_qp_context *qpc)
2675{
2676 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2677}
2678
2679static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2680{
2681 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2682}
2683
54679e14
JM
2684static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2685 struct mlx4_qp_context *context)
2686{
2687 u32 qpn = vhcr->in_modifier & 0xffffff;
2688 u32 qkey = 0;
2689
2690 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2691 return;
2692
2693 /* adjust qkey in qp context */
2694 context->qkey = cpu_to_be32(qkey);
2695}
2696
c82e9aa0
EC
2697int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2698 struct mlx4_vhcr *vhcr,
2699 struct mlx4_cmd_mailbox *inbox,
2700 struct mlx4_cmd_mailbox *outbox,
2701 struct mlx4_cmd_info *cmd)
2702{
2703 int err;
2704 int qpn = vhcr->in_modifier & 0x7fffff;
2705 struct res_mtt *mtt;
2706 struct res_qp *qp;
2707 struct mlx4_qp_context *qpc = inbox->buf + 8;
2b8fb286 2708 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2709 int mtt_size = qp_get_mtt_size(qpc);
2710 struct res_cq *rcq;
2711 struct res_cq *scq;
2712 int rcqn = qp_get_rcqn(qpc);
2713 int scqn = qp_get_scqn(qpc);
2714 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2715 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2716 struct res_srq *srq;
2717 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2718
2719 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2720 if (err)
2721 return err;
2722 qp->local_qpn = local_qpn;
b01978ca 2723 qp->sched_queue = 0;
f0f829bf
RE
2724 qp->param3 = 0;
2725 qp->vlan_control = 0;
2726 qp->fvl_rx = 0;
2727 qp->pri_path_fl = 0;
2728 qp->vlan_index = 0;
2729 qp->feup = 0;
b01978ca 2730 qp->qpc_flags = be32_to_cpu(qpc->flags);
c82e9aa0 2731
2b8fb286 2732 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2733 if (err)
2734 goto ex_abort;
2735
2736 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2737 if (err)
2738 goto ex_put_mtt;
2739
c82e9aa0
EC
2740 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2741 if (err)
2742 goto ex_put_mtt;
2743
2744 if (scqn != rcqn) {
2745 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2746 if (err)
2747 goto ex_put_rcq;
2748 } else
2749 scq = rcq;
2750
2751 if (use_srq) {
2752 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2753 if (err)
2754 goto ex_put_scq;
2755 }
2756
54679e14
JM
2757 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2758 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
2759 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2760 if (err)
2761 goto ex_put_srq;
2762 atomic_inc(&mtt->ref_count);
2763 qp->mtt = mtt;
2764 atomic_inc(&rcq->ref_count);
2765 qp->rcq = rcq;
2766 atomic_inc(&scq->ref_count);
2767 qp->scq = scq;
2768
2769 if (scqn != rcqn)
2770 put_res(dev, slave, scqn, RES_CQ);
2771
2772 if (use_srq) {
2773 atomic_inc(&srq->ref_count);
2774 put_res(dev, slave, srqn, RES_SRQ);
2775 qp->srq = srq;
2776 }
2777 put_res(dev, slave, rcqn, RES_CQ);
2b8fb286 2778 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2779 res_end_move(dev, slave, RES_QP, qpn);
2780
2781 return 0;
2782
2783ex_put_srq:
2784 if (use_srq)
2785 put_res(dev, slave, srqn, RES_SRQ);
2786ex_put_scq:
2787 if (scqn != rcqn)
2788 put_res(dev, slave, scqn, RES_CQ);
2789ex_put_rcq:
2790 put_res(dev, slave, rcqn, RES_CQ);
2791ex_put_mtt:
2b8fb286 2792 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2793ex_abort:
2794 res_abort_move(dev, slave, RES_QP, qpn);
2795
2796 return err;
2797}
2798
2b8fb286 2799static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
c82e9aa0
EC
2800{
2801 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2802}
2803
2804static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2805{
2806 int log_eq_size = eqc->log_eq_size & 0x1f;
2807 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2808
2809 if (log_eq_size + 5 < page_shift)
2810 return 1;
2811
2812 return 1 << (log_eq_size + 5 - page_shift);
2813}
2814
2b8fb286 2815static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
c82e9aa0
EC
2816{
2817 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2818}
2819
2820static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2821{
2822 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2823 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2824
2825 if (log_cq_size + 5 < page_shift)
2826 return 1;
2827
2828 return 1 << (log_cq_size + 5 - page_shift);
2829}
2830
2831int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2832 struct mlx4_vhcr *vhcr,
2833 struct mlx4_cmd_mailbox *inbox,
2834 struct mlx4_cmd_mailbox *outbox,
2835 struct mlx4_cmd_info *cmd)
2836{
2837 int err;
2838 int eqn = vhcr->in_modifier;
2839 int res_id = (slave << 8) | eqn;
2840 struct mlx4_eq_context *eqc = inbox->buf;
2b8fb286 2841 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2842 int mtt_size = eq_get_mtt_size(eqc);
2843 struct res_eq *eq;
2844 struct res_mtt *mtt;
2845
2846 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2847 if (err)
2848 return err;
2849 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2850 if (err)
2851 goto out_add;
2852
2b8fb286 2853 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2854 if (err)
2855 goto out_move;
2856
2857 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2858 if (err)
2859 goto out_put;
2860
2861 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2862 if (err)
2863 goto out_put;
2864
2865 atomic_inc(&mtt->ref_count);
2866 eq->mtt = mtt;
2867 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2868 res_end_move(dev, slave, RES_EQ, res_id);
2869 return 0;
2870
2871out_put:
2872 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2873out_move:
2874 res_abort_move(dev, slave, RES_EQ, res_id);
2875out_add:
2876 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2877 return err;
2878}
2879
d475c95b
MB
2880int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
2881 struct mlx4_vhcr *vhcr,
2882 struct mlx4_cmd_mailbox *inbox,
2883 struct mlx4_cmd_mailbox *outbox,
2884 struct mlx4_cmd_info *cmd)
2885{
2886 int err;
2887 u8 get = vhcr->op_modifier;
2888
2889 if (get != 1)
2890 return -EPERM;
2891
2892 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2893
2894 return err;
2895}
2896
c82e9aa0
EC
2897static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2898 int len, struct res_mtt **res)
2899{
2900 struct mlx4_priv *priv = mlx4_priv(dev);
2901 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2902 struct res_mtt *mtt;
2903 int err = -EINVAL;
2904
2905 spin_lock_irq(mlx4_tlock(dev));
2906 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2907 com.list) {
2908 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2909 *res = mtt;
2910 mtt->com.from_state = mtt->com.state;
2911 mtt->com.state = RES_MTT_BUSY;
2912 err = 0;
2913 break;
2914 }
2915 }
2916 spin_unlock_irq(mlx4_tlock(dev));
2917
2918 return err;
2919}
2920
54679e14 2921static int verify_qp_parameters(struct mlx4_dev *dev,
99ec41d0 2922 struct mlx4_vhcr *vhcr,
54679e14
JM
2923 struct mlx4_cmd_mailbox *inbox,
2924 enum qp_transition transition, u8 slave)
2925{
2926 u32 qp_type;
99ec41d0 2927 u32 qpn;
54679e14
JM
2928 struct mlx4_qp_context *qp_ctx;
2929 enum mlx4_qp_optpar optpar;
b6ffaeff
JM
2930 int port;
2931 int num_gids;
54679e14
JM
2932
2933 qp_ctx = inbox->buf + 8;
2934 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2935 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2936
2937 switch (qp_type) {
2938 case MLX4_QP_ST_RC:
b6ffaeff 2939 case MLX4_QP_ST_XRC:
54679e14
JM
2940 case MLX4_QP_ST_UC:
2941 switch (transition) {
2942 case QP_TRANS_INIT2RTR:
2943 case QP_TRANS_RTR2RTS:
2944 case QP_TRANS_RTS2RTS:
2945 case QP_TRANS_SQD2SQD:
2946 case QP_TRANS_SQD2RTS:
2947 if (slave != mlx4_master_func_num(dev))
b6ffaeff
JM
2948 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2949 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2950 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
449fc488 2951 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
b6ffaeff
JM
2952 else
2953 num_gids = 1;
2954 if (qp_ctx->pri_path.mgid_index >= num_gids)
54679e14 2955 return -EINVAL;
b6ffaeff
JM
2956 }
2957 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2958 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2959 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
449fc488 2960 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
b6ffaeff
JM
2961 else
2962 num_gids = 1;
2963 if (qp_ctx->alt_path.mgid_index >= num_gids)
54679e14 2964 return -EINVAL;
b6ffaeff 2965 }
54679e14
JM
2966 break;
2967 default:
2968 break;
2969 }
165cb465 2970 break;
54679e14 2971
165cb465
RD
2972 case MLX4_QP_ST_MLX:
2973 qpn = vhcr->in_modifier & 0x7fffff;
2974 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2975 if (transition == QP_TRANS_INIT2RTR &&
2976 slave != mlx4_master_func_num(dev) &&
2977 mlx4_is_qp_reserved(dev, qpn) &&
2978 !mlx4_vf_smi_enabled(dev, slave, port)) {
2979 /* only enabled VFs may create MLX proxy QPs */
2980 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
2981 __func__, slave, port);
2982 return -EPERM;
2983 }
54679e14 2984 break;
165cb465 2985
54679e14
JM
2986 default:
2987 break;
2988 }
2989
2990 return 0;
2991}
2992
c82e9aa0
EC
2993int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2994 struct mlx4_vhcr *vhcr,
2995 struct mlx4_cmd_mailbox *inbox,
2996 struct mlx4_cmd_mailbox *outbox,
2997 struct mlx4_cmd_info *cmd)
2998{
2999 struct mlx4_mtt mtt;
3000 __be64 *page_list = inbox->buf;
3001 u64 *pg_list = (u64 *)page_list;
3002 int i;
3003 struct res_mtt *rmtt = NULL;
3004 int start = be64_to_cpu(page_list[0]);
3005 int npages = vhcr->in_modifier;
3006 int err;
3007
3008 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3009 if (err)
3010 return err;
3011
3012 /* Call the SW implementation of write_mtt:
3013 * - Prepare a dummy mtt struct
3014 * - Translate inbox contents to simple addresses in host endianess */
2b8fb286
MA
3015 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3016 we don't really use it */
c82e9aa0
EC
3017 mtt.order = 0;
3018 mtt.page_shift = 0;
3019 for (i = 0; i < npages; ++i)
3020 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3021
3022 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3023 ((u64 *)page_list + 2));
3024
3025 if (rmtt)
3026 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3027
3028 return err;
3029}
3030
3031int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3032 struct mlx4_vhcr *vhcr,
3033 struct mlx4_cmd_mailbox *inbox,
3034 struct mlx4_cmd_mailbox *outbox,
3035 struct mlx4_cmd_info *cmd)
3036{
3037 int eqn = vhcr->in_modifier;
3038 int res_id = eqn | (slave << 8);
3039 struct res_eq *eq;
3040 int err;
3041
3042 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3043 if (err)
3044 return err;
3045
3046 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3047 if (err)
3048 goto ex_abort;
3049
3050 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3051 if (err)
3052 goto ex_put;
3053
3054 atomic_dec(&eq->mtt->ref_count);
3055 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3056 res_end_move(dev, slave, RES_EQ, res_id);
3057 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3058
3059 return 0;
3060
3061ex_put:
3062 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3063ex_abort:
3064 res_abort_move(dev, slave, RES_EQ, res_id);
3065
3066 return err;
3067}
3068
3069int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3070{
3071 struct mlx4_priv *priv = mlx4_priv(dev);
3072 struct mlx4_slave_event_eq_info *event_eq;
3073 struct mlx4_cmd_mailbox *mailbox;
3074 u32 in_modifier = 0;
3075 int err;
3076 int res_id;
3077 struct res_eq *req;
3078
3079 if (!priv->mfunc.master.slave_state)
3080 return -EINVAL;
3081
803143fb 3082 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
c82e9aa0
EC
3083
3084 /* Create the event only if the slave is registered */
803143fb 3085 if (event_eq->eqn < 0)
c82e9aa0
EC
3086 return 0;
3087
3088 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3089 res_id = (slave << 8) | event_eq->eqn;
3090 err = get_res(dev, slave, res_id, RES_EQ, &req);
3091 if (err)
3092 goto unlock;
3093
3094 if (req->com.from_state != RES_EQ_HW) {
3095 err = -EINVAL;
3096 goto put;
3097 }
3098
3099 mailbox = mlx4_alloc_cmd_mailbox(dev);
3100 if (IS_ERR(mailbox)) {
3101 err = PTR_ERR(mailbox);
3102 goto put;
3103 }
3104
3105 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3106 ++event_eq->token;
3107 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3108 }
3109
3110 memcpy(mailbox->buf, (u8 *) eqe, 28);
3111
3112 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3113
3114 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3115 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3116 MLX4_CMD_NATIVE);
3117
3118 put_res(dev, slave, res_id, RES_EQ);
3119 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3120 mlx4_free_cmd_mailbox(dev, mailbox);
3121 return err;
3122
3123put:
3124 put_res(dev, slave, res_id, RES_EQ);
3125
3126unlock:
3127 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3128 return err;
3129}
3130
3131int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3132 struct mlx4_vhcr *vhcr,
3133 struct mlx4_cmd_mailbox *inbox,
3134 struct mlx4_cmd_mailbox *outbox,
3135 struct mlx4_cmd_info *cmd)
3136{
3137 int eqn = vhcr->in_modifier;
3138 int res_id = eqn | (slave << 8);
3139 struct res_eq *eq;
3140 int err;
3141
3142 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3143 if (err)
3144 return err;
3145
3146 if (eq->com.from_state != RES_EQ_HW) {
3147 err = -EINVAL;
3148 goto ex_put;
3149 }
3150
3151 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3152
3153ex_put:
3154 put_res(dev, slave, res_id, RES_EQ);
3155 return err;
3156}
3157
3158int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3159 struct mlx4_vhcr *vhcr,
3160 struct mlx4_cmd_mailbox *inbox,
3161 struct mlx4_cmd_mailbox *outbox,
3162 struct mlx4_cmd_info *cmd)
3163{
3164 int err;
3165 int cqn = vhcr->in_modifier;
3166 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 3167 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
3168 struct res_cq *cq;
3169 struct res_mtt *mtt;
3170
3171 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3172 if (err)
3173 return err;
2b8fb286 3174 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
3175 if (err)
3176 goto out_move;
3177 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3178 if (err)
3179 goto out_put;
3180 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3181 if (err)
3182 goto out_put;
3183 atomic_inc(&mtt->ref_count);
3184 cq->mtt = mtt;
3185 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3186 res_end_move(dev, slave, RES_CQ, cqn);
3187 return 0;
3188
3189out_put:
3190 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3191out_move:
3192 res_abort_move(dev, slave, RES_CQ, cqn);
3193 return err;
3194}
3195
3196int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3197 struct mlx4_vhcr *vhcr,
3198 struct mlx4_cmd_mailbox *inbox,
3199 struct mlx4_cmd_mailbox *outbox,
3200 struct mlx4_cmd_info *cmd)
3201{
3202 int err;
3203 int cqn = vhcr->in_modifier;
3204 struct res_cq *cq;
3205
3206 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3207 if (err)
3208 return err;
3209 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3210 if (err)
3211 goto out_move;
3212 atomic_dec(&cq->mtt->ref_count);
3213 res_end_move(dev, slave, RES_CQ, cqn);
3214 return 0;
3215
3216out_move:
3217 res_abort_move(dev, slave, RES_CQ, cqn);
3218 return err;
3219}
3220
3221int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3222 struct mlx4_vhcr *vhcr,
3223 struct mlx4_cmd_mailbox *inbox,
3224 struct mlx4_cmd_mailbox *outbox,
3225 struct mlx4_cmd_info *cmd)
3226{
3227 int cqn = vhcr->in_modifier;
3228 struct res_cq *cq;
3229 int err;
3230
3231 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3232 if (err)
3233 return err;
3234
3235 if (cq->com.from_state != RES_CQ_HW)
3236 goto ex_put;
3237
3238 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3239ex_put:
3240 put_res(dev, slave, cqn, RES_CQ);
3241
3242 return err;
3243}
3244
3245static int handle_resize(struct mlx4_dev *dev, int slave,
3246 struct mlx4_vhcr *vhcr,
3247 struct mlx4_cmd_mailbox *inbox,
3248 struct mlx4_cmd_mailbox *outbox,
3249 struct mlx4_cmd_info *cmd,
3250 struct res_cq *cq)
3251{
3252 int err;
3253 struct res_mtt *orig_mtt;
3254 struct res_mtt *mtt;
3255 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 3256 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
3257
3258 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3259 if (err)
3260 return err;
3261
3262 if (orig_mtt != cq->mtt) {
3263 err = -EINVAL;
3264 goto ex_put;
3265 }
3266
2b8fb286 3267 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
3268 if (err)
3269 goto ex_put;
3270
3271 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3272 if (err)
3273 goto ex_put1;
3274 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3275 if (err)
3276 goto ex_put1;
3277 atomic_dec(&orig_mtt->ref_count);
3278 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3279 atomic_inc(&mtt->ref_count);
3280 cq->mtt = mtt;
3281 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3282 return 0;
3283
3284ex_put1:
3285 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3286ex_put:
3287 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3288
3289 return err;
3290
3291}
3292
3293int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3294 struct mlx4_vhcr *vhcr,
3295 struct mlx4_cmd_mailbox *inbox,
3296 struct mlx4_cmd_mailbox *outbox,
3297 struct mlx4_cmd_info *cmd)
3298{
3299 int cqn = vhcr->in_modifier;
3300 struct res_cq *cq;
3301 int err;
3302
3303 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3304 if (err)
3305 return err;
3306
3307 if (cq->com.from_state != RES_CQ_HW)
3308 goto ex_put;
3309
3310 if (vhcr->op_modifier == 0) {
3311 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
dcf353b1 3312 goto ex_put;
c82e9aa0
EC
3313 }
3314
3315 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3316ex_put:
3317 put_res(dev, slave, cqn, RES_CQ);
3318
3319 return err;
3320}
3321
c82e9aa0
EC
3322static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3323{
3324 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3325 int log_rq_stride = srqc->logstride & 7;
3326 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3327
3328 if (log_srq_size + log_rq_stride + 4 < page_shift)
3329 return 1;
3330
3331 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3332}
3333
3334int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3335 struct mlx4_vhcr *vhcr,
3336 struct mlx4_cmd_mailbox *inbox,
3337 struct mlx4_cmd_mailbox *outbox,
3338 struct mlx4_cmd_info *cmd)
3339{
3340 int err;
3341 int srqn = vhcr->in_modifier;
3342 struct res_mtt *mtt;
3343 struct res_srq *srq;
3344 struct mlx4_srq_context *srqc = inbox->buf;
2b8fb286 3345 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
3346
3347 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3348 return -EINVAL;
3349
3350 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3351 if (err)
3352 return err;
2b8fb286 3353 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
3354 if (err)
3355 goto ex_abort;
3356 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3357 mtt);
3358 if (err)
3359 goto ex_put_mtt;
3360
c82e9aa0
EC
3361 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3362 if (err)
3363 goto ex_put_mtt;
3364
3365 atomic_inc(&mtt->ref_count);
3366 srq->mtt = mtt;
3367 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3368 res_end_move(dev, slave, RES_SRQ, srqn);
3369 return 0;
3370
3371ex_put_mtt:
3372 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3373ex_abort:
3374 res_abort_move(dev, slave, RES_SRQ, srqn);
3375
3376 return err;
3377}
3378
3379int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3380 struct mlx4_vhcr *vhcr,
3381 struct mlx4_cmd_mailbox *inbox,
3382 struct mlx4_cmd_mailbox *outbox,
3383 struct mlx4_cmd_info *cmd)
3384{
3385 int err;
3386 int srqn = vhcr->in_modifier;
3387 struct res_srq *srq;
3388
3389 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3390 if (err)
3391 return err;
3392 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3393 if (err)
3394 goto ex_abort;
3395 atomic_dec(&srq->mtt->ref_count);
3396 if (srq->cq)
3397 atomic_dec(&srq->cq->ref_count);
3398 res_end_move(dev, slave, RES_SRQ, srqn);
3399
3400 return 0;
3401
3402ex_abort:
3403 res_abort_move(dev, slave, RES_SRQ, srqn);
3404
3405 return err;
3406}
3407
3408int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3409 struct mlx4_vhcr *vhcr,
3410 struct mlx4_cmd_mailbox *inbox,
3411 struct mlx4_cmd_mailbox *outbox,
3412 struct mlx4_cmd_info *cmd)
3413{
3414 int err;
3415 int srqn = vhcr->in_modifier;
3416 struct res_srq *srq;
3417
3418 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3419 if (err)
3420 return err;
3421 if (srq->com.from_state != RES_SRQ_HW) {
3422 err = -EBUSY;
3423 goto out;
3424 }
3425 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3426out:
3427 put_res(dev, slave, srqn, RES_SRQ);
3428 return err;
3429}
3430
3431int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3432 struct mlx4_vhcr *vhcr,
3433 struct mlx4_cmd_mailbox *inbox,
3434 struct mlx4_cmd_mailbox *outbox,
3435 struct mlx4_cmd_info *cmd)
3436{
3437 int err;
3438 int srqn = vhcr->in_modifier;
3439 struct res_srq *srq;
3440
3441 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3442 if (err)
3443 return err;
3444
3445 if (srq->com.from_state != RES_SRQ_HW) {
3446 err = -EBUSY;
3447 goto out;
3448 }
3449
3450 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3451out:
3452 put_res(dev, slave, srqn, RES_SRQ);
3453 return err;
3454}
3455
3456int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3457 struct mlx4_vhcr *vhcr,
3458 struct mlx4_cmd_mailbox *inbox,
3459 struct mlx4_cmd_mailbox *outbox,
3460 struct mlx4_cmd_info *cmd)
3461{
3462 int err;
3463 int qpn = vhcr->in_modifier & 0x7fffff;
3464 struct res_qp *qp;
3465
3466 err = get_res(dev, slave, qpn, RES_QP, &qp);
3467 if (err)
3468 return err;
3469 if (qp->com.from_state != RES_QP_HW) {
3470 err = -EBUSY;
3471 goto out;
3472 }
3473
3474 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3475out:
3476 put_res(dev, slave, qpn, RES_QP);
3477 return err;
3478}
3479
54679e14
JM
3480int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3481 struct mlx4_vhcr *vhcr,
3482 struct mlx4_cmd_mailbox *inbox,
3483 struct mlx4_cmd_mailbox *outbox,
3484 struct mlx4_cmd_info *cmd)
3485{
3486 struct mlx4_qp_context *context = inbox->buf + 8;
3487 adjust_proxy_tun_qkey(dev, vhcr, context);
3488 update_pkey_index(dev, slave, inbox);
3489 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3490}
3491
449fc488
MB
3492static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3493 struct mlx4_qp_context *qpc,
3494 struct mlx4_cmd_mailbox *inbox)
3495{
3496 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3497 u8 pri_sched_queue;
3498 int port = mlx4_slave_convert_port(
3499 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3500
3501 if (port < 0)
3502 return -EINVAL;
3503
3504 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3505 ((port & 1) << 6);
3506
3507 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3508 mlx4_is_eth(dev, port + 1)) {
3509 qpc->pri_path.sched_queue = pri_sched_queue;
3510 }
3511
3512 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3513 port = mlx4_slave_convert_port(
3514 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3515 + 1) - 1;
3516 if (port < 0)
3517 return -EINVAL;
3518 qpc->alt_path.sched_queue =
3519 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3520 (port & 1) << 6;
3521 }
3522 return 0;
3523}
3524
2f5bb473
JM
3525static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3526 struct mlx4_qp_context *qpc,
3527 struct mlx4_cmd_mailbox *inbox)
3528{
3529 u64 mac;
3530 int port;
3531 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3532 u8 sched = *(u8 *)(inbox->buf + 64);
3533 u8 smac_ix;
3534
3535 port = (sched >> 6 & 1) + 1;
3536 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3537 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3538 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3539 return -ENOENT;
3540 }
3541 return 0;
3542}
3543
c82e9aa0
EC
3544int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3545 struct mlx4_vhcr *vhcr,
3546 struct mlx4_cmd_mailbox *inbox,
3547 struct mlx4_cmd_mailbox *outbox,
3548 struct mlx4_cmd_info *cmd)
3549{
54679e14 3550 int err;
c82e9aa0 3551 struct mlx4_qp_context *qpc = inbox->buf + 8;
b01978ca
JM
3552 int qpn = vhcr->in_modifier & 0x7fffff;
3553 struct res_qp *qp;
3554 u8 orig_sched_queue;
f0f829bf
RE
3555 __be32 orig_param3 = qpc->param3;
3556 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3557 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3558 u8 orig_pri_path_fl = qpc->pri_path.fl;
3559 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3560 u8 orig_feup = qpc->pri_path.feup;
c82e9aa0 3561
449fc488
MB
3562 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3563 if (err)
3564 return err;
99ec41d0 3565 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
54679e14
JM
3566 if (err)
3567 return err;
3568
2f5bb473
JM
3569 if (roce_verify_mac(dev, slave, qpc, inbox))
3570 return -EINVAL;
3571
54679e14
JM
3572 update_pkey_index(dev, slave, inbox);
3573 update_gid(dev, inbox, (u8)slave);
3574 adjust_proxy_tun_qkey(dev, vhcr, qpc);
b01978ca
JM
3575 orig_sched_queue = qpc->pri_path.sched_queue;
3576 err = update_vport_qp_param(dev, inbox, slave, qpn);
3f7fb021
RE
3577 if (err)
3578 return err;
54679e14 3579
b01978ca
JM
3580 err = get_res(dev, slave, qpn, RES_QP, &qp);
3581 if (err)
3582 return err;
3583 if (qp->com.from_state != RES_QP_HW) {
3584 err = -EBUSY;
3585 goto out;
3586 }
3587
3588 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3589out:
3590 /* if no error, save sched queue value passed in by VF. This is
3591 * essentially the QOS value provided by the VF. This will be useful
3592 * if we allow dynamic changes from VST back to VGT
3593 */
f0f829bf 3594 if (!err) {
b01978ca 3595 qp->sched_queue = orig_sched_queue;
f0f829bf
RE
3596 qp->param3 = orig_param3;
3597 qp->vlan_control = orig_vlan_control;
3598 qp->fvl_rx = orig_fvl_rx;
3599 qp->pri_path_fl = orig_pri_path_fl;
3600 qp->vlan_index = orig_vlan_index;
3601 qp->feup = orig_feup;
3602 }
b01978ca
JM
3603 put_res(dev, slave, qpn, RES_QP);
3604 return err;
54679e14
JM
3605}
3606
3607int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3608 struct mlx4_vhcr *vhcr,
3609 struct mlx4_cmd_mailbox *inbox,
3610 struct mlx4_cmd_mailbox *outbox,
3611 struct mlx4_cmd_info *cmd)
3612{
3613 int err;
3614 struct mlx4_qp_context *context = inbox->buf + 8;
3615
449fc488
MB
3616 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3617 if (err)
3618 return err;
99ec41d0 3619 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
54679e14
JM
3620 if (err)
3621 return err;
3622
3623 update_pkey_index(dev, slave, inbox);
3624 update_gid(dev, inbox, (u8)slave);
3625 adjust_proxy_tun_qkey(dev, vhcr, context);
3626 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3627}
3628
3629int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3630 struct mlx4_vhcr *vhcr,
3631 struct mlx4_cmd_mailbox *inbox,
3632 struct mlx4_cmd_mailbox *outbox,
3633 struct mlx4_cmd_info *cmd)
3634{
3635 int err;
3636 struct mlx4_qp_context *context = inbox->buf + 8;
3637
449fc488
MB
3638 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3639 if (err)
3640 return err;
99ec41d0 3641 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
54679e14
JM
3642 if (err)
3643 return err;
3644
3645 update_pkey_index(dev, slave, inbox);
3646 update_gid(dev, inbox, (u8)slave);
3647 adjust_proxy_tun_qkey(dev, vhcr, context);
3648 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3649}
3650
3651
3652int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3653 struct mlx4_vhcr *vhcr,
3654 struct mlx4_cmd_mailbox *inbox,
3655 struct mlx4_cmd_mailbox *outbox,
3656 struct mlx4_cmd_info *cmd)
3657{
3658 struct mlx4_qp_context *context = inbox->buf + 8;
449fc488
MB
3659 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3660 if (err)
3661 return err;
54679e14
JM
3662 adjust_proxy_tun_qkey(dev, vhcr, context);
3663 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3664}
3665
3666int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3667 struct mlx4_vhcr *vhcr,
3668 struct mlx4_cmd_mailbox *inbox,
3669 struct mlx4_cmd_mailbox *outbox,
3670 struct mlx4_cmd_info *cmd)
3671{
3672 int err;
3673 struct mlx4_qp_context *context = inbox->buf + 8;
3674
449fc488
MB
3675 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3676 if (err)
3677 return err;
99ec41d0 3678 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
54679e14
JM
3679 if (err)
3680 return err;
3681
3682 adjust_proxy_tun_qkey(dev, vhcr, context);
3683 update_gid(dev, inbox, (u8)slave);
3684 update_pkey_index(dev, slave, inbox);
3685 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3686}
3687
3688int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3689 struct mlx4_vhcr *vhcr,
3690 struct mlx4_cmd_mailbox *inbox,
3691 struct mlx4_cmd_mailbox *outbox,
3692 struct mlx4_cmd_info *cmd)
3693{
3694 int err;
3695 struct mlx4_qp_context *context = inbox->buf + 8;
3696
449fc488
MB
3697 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3698 if (err)
3699 return err;
99ec41d0 3700 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
54679e14
JM
3701 if (err)
3702 return err;
c82e9aa0 3703
54679e14
JM
3704 adjust_proxy_tun_qkey(dev, vhcr, context);
3705 update_gid(dev, inbox, (u8)slave);
3706 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
3707 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3708}
3709
3710int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3711 struct mlx4_vhcr *vhcr,
3712 struct mlx4_cmd_mailbox *inbox,
3713 struct mlx4_cmd_mailbox *outbox,
3714 struct mlx4_cmd_info *cmd)
3715{
3716 int err;
3717 int qpn = vhcr->in_modifier & 0x7fffff;
3718 struct res_qp *qp;
3719
3720 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3721 if (err)
3722 return err;
3723 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3724 if (err)
3725 goto ex_abort;
3726
3727 atomic_dec(&qp->mtt->ref_count);
3728 atomic_dec(&qp->rcq->ref_count);
3729 atomic_dec(&qp->scq->ref_count);
3730 if (qp->srq)
3731 atomic_dec(&qp->srq->ref_count);
3732 res_end_move(dev, slave, RES_QP, qpn);
3733 return 0;
3734
3735ex_abort:
3736 res_abort_move(dev, slave, RES_QP, qpn);
3737
3738 return err;
3739}
3740
3741static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3742 struct res_qp *rqp, u8 *gid)
3743{
3744 struct res_gid *res;
3745
3746 list_for_each_entry(res, &rqp->mcg_list, list) {
3747 if (!memcmp(res->gid, gid, 16))
3748 return res;
3749 }
3750 return NULL;
3751}
3752
3753static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 3754 u8 *gid, enum mlx4_protocol prot,
fab1e24a 3755 enum mlx4_steer_type steer, u64 reg_id)
c82e9aa0
EC
3756{
3757 struct res_gid *res;
3758 int err;
3759
3760 res = kzalloc(sizeof *res, GFP_KERNEL);
3761 if (!res)
3762 return -ENOMEM;
3763
3764 spin_lock_irq(&rqp->mcg_spl);
3765 if (find_gid(dev, slave, rqp, gid)) {
3766 kfree(res);
3767 err = -EEXIST;
3768 } else {
3769 memcpy(res->gid, gid, 16);
3770 res->prot = prot;
9f5b6c63 3771 res->steer = steer;
fab1e24a 3772 res->reg_id = reg_id;
c82e9aa0
EC
3773 list_add_tail(&res->list, &rqp->mcg_list);
3774 err = 0;
3775 }
3776 spin_unlock_irq(&rqp->mcg_spl);
3777
3778 return err;
3779}
3780
3781static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 3782 u8 *gid, enum mlx4_protocol prot,
fab1e24a 3783 enum mlx4_steer_type steer, u64 *reg_id)
c82e9aa0
EC
3784{
3785 struct res_gid *res;
3786 int err;
3787
3788 spin_lock_irq(&rqp->mcg_spl);
3789 res = find_gid(dev, slave, rqp, gid);
9f5b6c63 3790 if (!res || res->prot != prot || res->steer != steer)
c82e9aa0
EC
3791 err = -EINVAL;
3792 else {
fab1e24a 3793 *reg_id = res->reg_id;
c82e9aa0
EC
3794 list_del(&res->list);
3795 kfree(res);
3796 err = 0;
3797 }
3798 spin_unlock_irq(&rqp->mcg_spl);
3799
3800 return err;
3801}
3802
449fc488
MB
3803static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3804 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
fab1e24a
HHZ
3805 enum mlx4_steer_type type, u64 *reg_id)
3806{
3807 switch (dev->caps.steering_mode) {
449fc488
MB
3808 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3809 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3810 if (port < 0)
3811 return port;
3812 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
fab1e24a
HHZ
3813 block_loopback, prot,
3814 reg_id);
449fc488 3815 }
fab1e24a 3816 case MLX4_STEERING_MODE_B0:
449fc488
MB
3817 if (prot == MLX4_PROT_ETH) {
3818 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3819 if (port < 0)
3820 return port;
3821 gid[5] = port;
3822 }
fab1e24a
HHZ
3823 return mlx4_qp_attach_common(dev, qp, gid,
3824 block_loopback, prot, type);
3825 default:
3826 return -EINVAL;
3827 }
3828}
3829
449fc488
MB
3830static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3831 u8 gid[16], enum mlx4_protocol prot,
3832 enum mlx4_steer_type type, u64 reg_id)
fab1e24a
HHZ
3833{
3834 switch (dev->caps.steering_mode) {
3835 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3836 return mlx4_flow_detach(dev, reg_id);
3837 case MLX4_STEERING_MODE_B0:
3838 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3839 default:
3840 return -EINVAL;
3841 }
3842}
3843
531d9014
JM
3844static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3845 u8 *gid, enum mlx4_protocol prot)
3846{
3847 int real_port;
3848
3849 if (prot != MLX4_PROT_ETH)
3850 return 0;
3851
3852 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3853 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3854 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3855 if (real_port < 0)
3856 return -EINVAL;
3857 gid[5] = real_port;
3858 }
3859
3860 return 0;
3861}
3862
c82e9aa0
EC
3863int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3864 struct mlx4_vhcr *vhcr,
3865 struct mlx4_cmd_mailbox *inbox,
3866 struct mlx4_cmd_mailbox *outbox,
3867 struct mlx4_cmd_info *cmd)
3868{
3869 struct mlx4_qp qp; /* dummy for calling attach/detach */
3870 u8 *gid = inbox->buf;
3871 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
162344ed 3872 int err;
c82e9aa0
EC
3873 int qpn;
3874 struct res_qp *rqp;
fab1e24a 3875 u64 reg_id = 0;
c82e9aa0
EC
3876 int attach = vhcr->op_modifier;
3877 int block_loopback = vhcr->in_modifier >> 31;
3878 u8 steer_type_mask = 2;
75c6062c 3879 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
c82e9aa0
EC
3880
3881 qpn = vhcr->in_modifier & 0xffffff;
3882 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3883 if (err)
3884 return err;
3885
3886 qp.qpn = qpn;
3887 if (attach) {
449fc488 3888 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
fab1e24a
HHZ
3889 type, &reg_id);
3890 if (err) {
3891 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
c82e9aa0 3892 goto ex_put;
fab1e24a
HHZ
3893 }
3894 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
c82e9aa0 3895 if (err)
fab1e24a 3896 goto ex_detach;
c82e9aa0 3897 } else {
531d9014
JM
3898 err = mlx4_adjust_port(dev, slave, gid, prot);
3899 if (err)
3900 goto ex_put;
3901
fab1e24a 3902 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
c82e9aa0
EC
3903 if (err)
3904 goto ex_put;
c82e9aa0 3905
fab1e24a
HHZ
3906 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3907 if (err)
3908 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3909 qpn, reg_id);
3910 }
c82e9aa0 3911 put_res(dev, slave, qpn, RES_QP);
fab1e24a 3912 return err;
c82e9aa0 3913
fab1e24a
HHZ
3914ex_detach:
3915 qp_detach(dev, &qp, gid, prot, type, reg_id);
c82e9aa0
EC
3916ex_put:
3917 put_res(dev, slave, qpn, RES_QP);
c82e9aa0
EC
3918 return err;
3919}
3920
7fb40f87
HHZ
3921/*
3922 * MAC validation for Flow Steering rules.
3923 * VF can attach rules only with a mac address which is assigned to it.
3924 */
3925static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3926 struct list_head *rlist)
3927{
3928 struct mac_res *res, *tmp;
3929 __be64 be_mac;
3930
3931 /* make sure it isn't multicast or broadcast mac*/
3932 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3933 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3934 list_for_each_entry_safe(res, tmp, rlist, list) {
3935 be_mac = cpu_to_be64(res->mac << 16);
c0623e58 3936 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
7fb40f87
HHZ
3937 return 0;
3938 }
3939 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3940 eth_header->eth.dst_mac, slave);
3941 return -EINVAL;
3942 }
3943 return 0;
3944}
3945
3946/*
3947 * In case of missing eth header, append eth header with a MAC address
3948 * assigned to the VF.
3949 */
3950static int add_eth_header(struct mlx4_dev *dev, int slave,
3951 struct mlx4_cmd_mailbox *inbox,
3952 struct list_head *rlist, int header_id)
3953{
3954 struct mac_res *res, *tmp;
3955 u8 port;
3956 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3957 struct mlx4_net_trans_rule_hw_eth *eth_header;
3958 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3959 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3960 __be64 be_mac = 0;
3961 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3962
3963 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
015465f8 3964 port = ctrl->port;
7fb40f87
HHZ
3965 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3966
3967 /* Clear a space in the inbox for eth header */
3968 switch (header_id) {
3969 case MLX4_NET_TRANS_RULE_ID_IPV4:
3970 ip_header =
3971 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3972 memmove(ip_header, eth_header,
3973 sizeof(*ip_header) + sizeof(*l4_header));
3974 break;
3975 case MLX4_NET_TRANS_RULE_ID_TCP:
3976 case MLX4_NET_TRANS_RULE_ID_UDP:
3977 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3978 (eth_header + 1);
3979 memmove(l4_header, eth_header, sizeof(*l4_header));
3980 break;
3981 default:
3982 return -EINVAL;
3983 }
3984 list_for_each_entry_safe(res, tmp, rlist, list) {
3985 if (port == res->port) {
3986 be_mac = cpu_to_be64(res->mac << 16);
3987 break;
3988 }
3989 }
3990 if (!be_mac) {
1a91de28 3991 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
7fb40f87
HHZ
3992 port);
3993 return -EINVAL;
3994 }
3995
3996 memset(eth_header, 0, sizeof(*eth_header));
3997 eth_header->size = sizeof(*eth_header) >> 2;
3998 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3999 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4000 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4001
4002 return 0;
4003
4004}
4005
ce8d9e0d
MB
4006#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4007int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4008 struct mlx4_vhcr *vhcr,
4009 struct mlx4_cmd_mailbox *inbox,
4010 struct mlx4_cmd_mailbox *outbox,
4011 struct mlx4_cmd_info *cmd_info)
4012{
4013 int err;
4014 u32 qpn = vhcr->in_modifier & 0xffffff;
4015 struct res_qp *rqp;
4016 u64 mac;
4017 unsigned port;
4018 u64 pri_addr_path_mask;
4019 struct mlx4_update_qp_context *cmd;
4020 int smac_index;
4021
4022 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4023
4024 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4025 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4026 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4027 return -EPERM;
4028
4029 /* Just change the smac for the QP */
4030 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4031 if (err) {
4032 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4033 return err;
4034 }
4035
4036 port = (rqp->sched_queue >> 6 & 1) + 1;
b7834758
MB
4037
4038 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4039 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4040 err = mac_find_smac_ix_in_slave(dev, slave, port,
4041 smac_index, &mac);
4042
4043 if (err) {
4044 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4045 qpn, smac_index);
4046 goto err_mac;
4047 }
ce8d9e0d
MB
4048 }
4049
4050 err = mlx4_cmd(dev, inbox->dma,
4051 vhcr->in_modifier, 0,
4052 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4053 MLX4_CMD_NATIVE);
4054 if (err) {
4055 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4056 goto err_mac;
4057 }
4058
4059err_mac:
4060 put_res(dev, slave, qpn, RES_QP);
4061 return err;
4062}
4063
8fcfb4db
HHZ
4064int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4065 struct mlx4_vhcr *vhcr,
4066 struct mlx4_cmd_mailbox *inbox,
4067 struct mlx4_cmd_mailbox *outbox,
4068 struct mlx4_cmd_info *cmd)
4069{
7fb40f87
HHZ
4070
4071 struct mlx4_priv *priv = mlx4_priv(dev);
4072 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4073 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
1b9c6b06 4074 int err;
a9c01e7a 4075 int qpn;
2c473ae7 4076 struct res_qp *rqp;
7fb40f87
HHZ
4077 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4078 struct _rule_hw *rule_header;
4079 int header_id;
1b9c6b06 4080
0ff1fb65
HHZ
4081 if (dev->caps.steering_mode !=
4082 MLX4_STEERING_MODE_DEVICE_MANAGED)
4083 return -EOPNOTSUPP;
1b9c6b06 4084
7fb40f87 4085 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
449fc488
MB
4086 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4087 if (ctrl->port <= 0)
4088 return -EINVAL;
a9c01e7a 4089 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
2c473ae7 4090 err = get_res(dev, slave, qpn, RES_QP, &rqp);
a9c01e7a 4091 if (err) {
1a91de28 4092 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
a9c01e7a
HHZ
4093 return err;
4094 }
7fb40f87
HHZ
4095 rule_header = (struct _rule_hw *)(ctrl + 1);
4096 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4097
4098 switch (header_id) {
4099 case MLX4_NET_TRANS_RULE_ID_ETH:
a9c01e7a
HHZ
4100 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4101 err = -EINVAL;
4102 goto err_put;
4103 }
7fb40f87 4104 break;
60396683
JM
4105 case MLX4_NET_TRANS_RULE_ID_IB:
4106 break;
7fb40f87
HHZ
4107 case MLX4_NET_TRANS_RULE_ID_IPV4:
4108 case MLX4_NET_TRANS_RULE_ID_TCP:
4109 case MLX4_NET_TRANS_RULE_ID_UDP:
1a91de28 4110 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
a9c01e7a
HHZ
4111 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4112 err = -EINVAL;
4113 goto err_put;
4114 }
7fb40f87
HHZ
4115 vhcr->in_modifier +=
4116 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4117 break;
4118 default:
1a91de28 4119 pr_err("Corrupted mailbox\n");
a9c01e7a
HHZ
4120 err = -EINVAL;
4121 goto err_put;
7fb40f87
HHZ
4122 }
4123
1b9c6b06
HHZ
4124 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4125 vhcr->in_modifier, 0,
4126 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4127 MLX4_CMD_NATIVE);
4128 if (err)
a9c01e7a 4129 goto err_put;
1b9c6b06 4130
2c473ae7 4131 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
1b9c6b06 4132 if (err) {
1a91de28 4133 mlx4_err(dev, "Fail to add flow steering resources\n");
1b9c6b06
HHZ
4134 /* detach rule*/
4135 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2065b38b 4136 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1b9c6b06 4137 MLX4_CMD_NATIVE);
2c473ae7 4138 goto err_put;
1b9c6b06 4139 }
2c473ae7 4140 atomic_inc(&rqp->ref_count);
a9c01e7a
HHZ
4141err_put:
4142 put_res(dev, slave, qpn, RES_QP);
1b9c6b06 4143 return err;
8fcfb4db
HHZ
4144}
4145
4146int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4147 struct mlx4_vhcr *vhcr,
4148 struct mlx4_cmd_mailbox *inbox,
4149 struct mlx4_cmd_mailbox *outbox,
4150 struct mlx4_cmd_info *cmd)
4151{
1b9c6b06 4152 int err;
2c473ae7
HHZ
4153 struct res_qp *rqp;
4154 struct res_fs_rule *rrule;
1b9c6b06 4155
0ff1fb65
HHZ
4156 if (dev->caps.steering_mode !=
4157 MLX4_STEERING_MODE_DEVICE_MANAGED)
4158 return -EOPNOTSUPP;
1b9c6b06 4159
2c473ae7
HHZ
4160 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4161 if (err)
4162 return err;
4163 /* Release the rule form busy state before removal */
4164 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4165 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4166 if (err)
4167 return err;
4168
1b9c6b06
HHZ
4169 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4170 if (err) {
1a91de28 4171 mlx4_err(dev, "Fail to remove flow steering resources\n");
2c473ae7 4172 goto out;
1b9c6b06
HHZ
4173 }
4174
4175 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4176 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4177 MLX4_CMD_NATIVE);
2c473ae7
HHZ
4178 if (!err)
4179 atomic_dec(&rqp->ref_count);
4180out:
4181 put_res(dev, slave, rrule->qpn, RES_QP);
1b9c6b06 4182 return err;
8fcfb4db
HHZ
4183}
4184
c82e9aa0
EC
4185enum {
4186 BUSY_MAX_RETRIES = 10
4187};
4188
4189int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4190 struct mlx4_vhcr *vhcr,
4191 struct mlx4_cmd_mailbox *inbox,
4192 struct mlx4_cmd_mailbox *outbox,
4193 struct mlx4_cmd_info *cmd)
4194{
4195 int err;
4196 int index = vhcr->in_modifier & 0xffff;
4197
4198 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4199 if (err)
4200 return err;
4201
4202 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4203 put_res(dev, slave, index, RES_COUNTER);
4204 return err;
4205}
4206
4207static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4208{
4209 struct res_gid *rgid;
4210 struct res_gid *tmp;
c82e9aa0
EC
4211 struct mlx4_qp qp; /* dummy for calling attach/detach */
4212
4213 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
fab1e24a
HHZ
4214 switch (dev->caps.steering_mode) {
4215 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4216 mlx4_flow_detach(dev, rgid->reg_id);
4217 break;
4218 case MLX4_STEERING_MODE_B0:
4219 qp.qpn = rqp->local_qpn;
4220 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4221 rgid->prot, rgid->steer);
4222 break;
4223 }
c82e9aa0
EC
4224 list_del(&rgid->list);
4225 kfree(rgid);
4226 }
4227}
4228
4229static int _move_all_busy(struct mlx4_dev *dev, int slave,
4230 enum mlx4_resource type, int print)
4231{
4232 struct mlx4_priv *priv = mlx4_priv(dev);
4233 struct mlx4_resource_tracker *tracker =
4234 &priv->mfunc.master.res_tracker;
4235 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4236 struct res_common *r;
4237 struct res_common *tmp;
4238 int busy;
4239
4240 busy = 0;
4241 spin_lock_irq(mlx4_tlock(dev));
4242 list_for_each_entry_safe(r, tmp, rlist, list) {
4243 if (r->owner == slave) {
4244 if (!r->removing) {
4245 if (r->state == RES_ANY_BUSY) {
4246 if (print)
4247 mlx4_dbg(dev,
aa1ec3dd 4248 "%s id 0x%llx is busy\n",
95646373 4249 resource_str(type),
c82e9aa0
EC
4250 r->res_id);
4251 ++busy;
4252 } else {
4253 r->from_state = r->state;
4254 r->state = RES_ANY_BUSY;
4255 r->removing = 1;
4256 }
4257 }
4258 }
4259 }
4260 spin_unlock_irq(mlx4_tlock(dev));
4261
4262 return busy;
4263}
4264
4265static int move_all_busy(struct mlx4_dev *dev, int slave,
4266 enum mlx4_resource type)
4267{
4268 unsigned long begin;
4269 int busy;
4270
4271 begin = jiffies;
4272 do {
4273 busy = _move_all_busy(dev, slave, type, 0);
4274 if (time_after(jiffies, begin + 5 * HZ))
4275 break;
4276 if (busy)
4277 cond_resched();
4278 } while (busy);
4279
4280 if (busy)
4281 busy = _move_all_busy(dev, slave, type, 1);
4282
4283 return busy;
4284}
4285static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4286{
4287 struct mlx4_priv *priv = mlx4_priv(dev);
4288 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4289 struct list_head *qp_list =
4290 &tracker->slave_list[slave].res_list[RES_QP];
4291 struct res_qp *qp;
4292 struct res_qp *tmp;
4293 int state;
4294 u64 in_param;
4295 int qpn;
4296 int err;
4297
4298 err = move_all_busy(dev, slave, RES_QP);
4299 if (err)
1a91de28
JP
4300 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4301 slave);
c82e9aa0
EC
4302
4303 spin_lock_irq(mlx4_tlock(dev));
4304 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4305 spin_unlock_irq(mlx4_tlock(dev));
4306 if (qp->com.owner == slave) {
4307 qpn = qp->com.res_id;
4308 detach_qp(dev, slave, qp);
4309 state = qp->com.from_state;
4310 while (state != 0) {
4311 switch (state) {
4312 case RES_QP_RESERVED:
4313 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4314 rb_erase(&qp->com.node,
4315 &tracker->res_tree[RES_QP]);
c82e9aa0
EC
4316 list_del(&qp->com.list);
4317 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4318 if (!valid_reserved(dev, slave, qpn)) {
4319 __mlx4_qp_release_range(dev, qpn, 1);
4320 mlx4_release_resource(dev, slave,
4321 RES_QP, 1, 0);
4322 }
c82e9aa0
EC
4323 kfree(qp);
4324 state = 0;
4325 break;
4326 case RES_QP_MAPPED:
4327 if (!valid_reserved(dev, slave, qpn))
4328 __mlx4_qp_free_icm(dev, qpn);
4329 state = RES_QP_RESERVED;
4330 break;
4331 case RES_QP_HW:
4332 in_param = slave;
4333 err = mlx4_cmd(dev, in_param,
4334 qp->local_qpn, 2,
4335 MLX4_CMD_2RST_QP,
4336 MLX4_CMD_TIME_CLASS_A,
4337 MLX4_CMD_NATIVE);
4338 if (err)
1a91de28
JP
4339 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4340 slave, qp->local_qpn);
c82e9aa0
EC
4341 atomic_dec(&qp->rcq->ref_count);
4342 atomic_dec(&qp->scq->ref_count);
4343 atomic_dec(&qp->mtt->ref_count);
4344 if (qp->srq)
4345 atomic_dec(&qp->srq->ref_count);
4346 state = RES_QP_MAPPED;
4347 break;
4348 default:
4349 state = 0;
4350 }
4351 }
4352 }
4353 spin_lock_irq(mlx4_tlock(dev));
4354 }
4355 spin_unlock_irq(mlx4_tlock(dev));
4356}
4357
4358static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4359{
4360 struct mlx4_priv *priv = mlx4_priv(dev);
4361 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4362 struct list_head *srq_list =
4363 &tracker->slave_list[slave].res_list[RES_SRQ];
4364 struct res_srq *srq;
4365 struct res_srq *tmp;
4366 int state;
4367 u64 in_param;
4368 LIST_HEAD(tlist);
4369 int srqn;
4370 int err;
4371
4372 err = move_all_busy(dev, slave, RES_SRQ);
4373 if (err)
1a91de28
JP
4374 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4375 slave);
c82e9aa0
EC
4376
4377 spin_lock_irq(mlx4_tlock(dev));
4378 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4379 spin_unlock_irq(mlx4_tlock(dev));
4380 if (srq->com.owner == slave) {
4381 srqn = srq->com.res_id;
4382 state = srq->com.from_state;
4383 while (state != 0) {
4384 switch (state) {
4385 case RES_SRQ_ALLOCATED:
4386 __mlx4_srq_free_icm(dev, srqn);
4387 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4388 rb_erase(&srq->com.node,
4389 &tracker->res_tree[RES_SRQ]);
c82e9aa0
EC
4390 list_del(&srq->com.list);
4391 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4392 mlx4_release_resource(dev, slave,
4393 RES_SRQ, 1, 0);
c82e9aa0
EC
4394 kfree(srq);
4395 state = 0;
4396 break;
4397
4398 case RES_SRQ_HW:
4399 in_param = slave;
4400 err = mlx4_cmd(dev, in_param, srqn, 1,
4401 MLX4_CMD_HW2SW_SRQ,
4402 MLX4_CMD_TIME_CLASS_A,
4403 MLX4_CMD_NATIVE);
4404 if (err)
1a91de28 4405 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
c82e9aa0
EC
4406 slave, srqn);
4407
4408 atomic_dec(&srq->mtt->ref_count);
4409 if (srq->cq)
4410 atomic_dec(&srq->cq->ref_count);
4411 state = RES_SRQ_ALLOCATED;
4412 break;
4413
4414 default:
4415 state = 0;
4416 }
4417 }
4418 }
4419 spin_lock_irq(mlx4_tlock(dev));
4420 }
4421 spin_unlock_irq(mlx4_tlock(dev));
4422}
4423
4424static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4425{
4426 struct mlx4_priv *priv = mlx4_priv(dev);
4427 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4428 struct list_head *cq_list =
4429 &tracker->slave_list[slave].res_list[RES_CQ];
4430 struct res_cq *cq;
4431 struct res_cq *tmp;
4432 int state;
4433 u64 in_param;
4434 LIST_HEAD(tlist);
4435 int cqn;
4436 int err;
4437
4438 err = move_all_busy(dev, slave, RES_CQ);
4439 if (err)
1a91de28
JP
4440 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4441 slave);
c82e9aa0
EC
4442
4443 spin_lock_irq(mlx4_tlock(dev));
4444 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4445 spin_unlock_irq(mlx4_tlock(dev));
4446 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4447 cqn = cq->com.res_id;
4448 state = cq->com.from_state;
4449 while (state != 0) {
4450 switch (state) {
4451 case RES_CQ_ALLOCATED:
4452 __mlx4_cq_free_icm(dev, cqn);
4453 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4454 rb_erase(&cq->com.node,
4455 &tracker->res_tree[RES_CQ]);
c82e9aa0
EC
4456 list_del(&cq->com.list);
4457 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4458 mlx4_release_resource(dev, slave,
4459 RES_CQ, 1, 0);
c82e9aa0
EC
4460 kfree(cq);
4461 state = 0;
4462 break;
4463
4464 case RES_CQ_HW:
4465 in_param = slave;
4466 err = mlx4_cmd(dev, in_param, cqn, 1,
4467 MLX4_CMD_HW2SW_CQ,
4468 MLX4_CMD_TIME_CLASS_A,
4469 MLX4_CMD_NATIVE);
4470 if (err)
1a91de28 4471 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
c82e9aa0
EC
4472 slave, cqn);
4473 atomic_dec(&cq->mtt->ref_count);
4474 state = RES_CQ_ALLOCATED;
4475 break;
4476
4477 default:
4478 state = 0;
4479 }
4480 }
4481 }
4482 spin_lock_irq(mlx4_tlock(dev));
4483 }
4484 spin_unlock_irq(mlx4_tlock(dev));
4485}
4486
4487static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4488{
4489 struct mlx4_priv *priv = mlx4_priv(dev);
4490 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4491 struct list_head *mpt_list =
4492 &tracker->slave_list[slave].res_list[RES_MPT];
4493 struct res_mpt *mpt;
4494 struct res_mpt *tmp;
4495 int state;
4496 u64 in_param;
4497 LIST_HEAD(tlist);
4498 int mptn;
4499 int err;
4500
4501 err = move_all_busy(dev, slave, RES_MPT);
4502 if (err)
1a91de28
JP
4503 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4504 slave);
c82e9aa0
EC
4505
4506 spin_lock_irq(mlx4_tlock(dev));
4507 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4508 spin_unlock_irq(mlx4_tlock(dev));
4509 if (mpt->com.owner == slave) {
4510 mptn = mpt->com.res_id;
4511 state = mpt->com.from_state;
4512 while (state != 0) {
4513 switch (state) {
4514 case RES_MPT_RESERVED:
b20e519a 4515 __mlx4_mpt_release(dev, mpt->key);
c82e9aa0 4516 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4517 rb_erase(&mpt->com.node,
4518 &tracker->res_tree[RES_MPT]);
c82e9aa0
EC
4519 list_del(&mpt->com.list);
4520 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4521 mlx4_release_resource(dev, slave,
4522 RES_MPT, 1, 0);
c82e9aa0
EC
4523 kfree(mpt);
4524 state = 0;
4525 break;
4526
4527 case RES_MPT_MAPPED:
b20e519a 4528 __mlx4_mpt_free_icm(dev, mpt->key);
c82e9aa0
EC
4529 state = RES_MPT_RESERVED;
4530 break;
4531
4532 case RES_MPT_HW:
4533 in_param = slave;
4534 err = mlx4_cmd(dev, in_param, mptn, 0,
4535 MLX4_CMD_HW2SW_MPT,
4536 MLX4_CMD_TIME_CLASS_A,
4537 MLX4_CMD_NATIVE);
4538 if (err)
1a91de28 4539 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
c82e9aa0
EC
4540 slave, mptn);
4541 if (mpt->mtt)
4542 atomic_dec(&mpt->mtt->ref_count);
4543 state = RES_MPT_MAPPED;
4544 break;
4545 default:
4546 state = 0;
4547 }
4548 }
4549 }
4550 spin_lock_irq(mlx4_tlock(dev));
4551 }
4552 spin_unlock_irq(mlx4_tlock(dev));
4553}
4554
4555static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4556{
4557 struct mlx4_priv *priv = mlx4_priv(dev);
4558 struct mlx4_resource_tracker *tracker =
4559 &priv->mfunc.master.res_tracker;
4560 struct list_head *mtt_list =
4561 &tracker->slave_list[slave].res_list[RES_MTT];
4562 struct res_mtt *mtt;
4563 struct res_mtt *tmp;
4564 int state;
4565 LIST_HEAD(tlist);
4566 int base;
4567 int err;
4568
4569 err = move_all_busy(dev, slave, RES_MTT);
4570 if (err)
1a91de28
JP
4571 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4572 slave);
c82e9aa0
EC
4573
4574 spin_lock_irq(mlx4_tlock(dev));
4575 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4576 spin_unlock_irq(mlx4_tlock(dev));
4577 if (mtt->com.owner == slave) {
4578 base = mtt->com.res_id;
4579 state = mtt->com.from_state;
4580 while (state != 0) {
4581 switch (state) {
4582 case RES_MTT_ALLOCATED:
4583 __mlx4_free_mtt_range(dev, base,
4584 mtt->order);
4585 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4586 rb_erase(&mtt->com.node,
4587 &tracker->res_tree[RES_MTT]);
c82e9aa0
EC
4588 list_del(&mtt->com.list);
4589 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4590 mlx4_release_resource(dev, slave, RES_MTT,
4591 1 << mtt->order, 0);
c82e9aa0
EC
4592 kfree(mtt);
4593 state = 0;
4594 break;
4595
4596 default:
4597 state = 0;
4598 }
4599 }
4600 }
4601 spin_lock_irq(mlx4_tlock(dev));
4602 }
4603 spin_unlock_irq(mlx4_tlock(dev));
4604}
4605
1b9c6b06
HHZ
4606static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4607{
4608 struct mlx4_priv *priv = mlx4_priv(dev);
4609 struct mlx4_resource_tracker *tracker =
4610 &priv->mfunc.master.res_tracker;
4611 struct list_head *fs_rule_list =
4612 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4613 struct res_fs_rule *fs_rule;
4614 struct res_fs_rule *tmp;
4615 int state;
4616 u64 base;
4617 int err;
4618
4619 err = move_all_busy(dev, slave, RES_FS_RULE);
4620 if (err)
4621 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4622 slave);
4623
4624 spin_lock_irq(mlx4_tlock(dev));
4625 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4626 spin_unlock_irq(mlx4_tlock(dev));
4627 if (fs_rule->com.owner == slave) {
4628 base = fs_rule->com.res_id;
4629 state = fs_rule->com.from_state;
4630 while (state != 0) {
4631 switch (state) {
4632 case RES_FS_RULE_ALLOCATED:
4633 /* detach rule */
4634 err = mlx4_cmd(dev, base, 0, 0,
4635 MLX4_QP_FLOW_STEERING_DETACH,
4636 MLX4_CMD_TIME_CLASS_A,
4637 MLX4_CMD_NATIVE);
4638
4639 spin_lock_irq(mlx4_tlock(dev));
4640 rb_erase(&fs_rule->com.node,
4641 &tracker->res_tree[RES_FS_RULE]);
4642 list_del(&fs_rule->com.list);
4643 spin_unlock_irq(mlx4_tlock(dev));
4644 kfree(fs_rule);
4645 state = 0;
4646 break;
4647
4648 default:
4649 state = 0;
4650 }
4651 }
4652 }
4653 spin_lock_irq(mlx4_tlock(dev));
4654 }
4655 spin_unlock_irq(mlx4_tlock(dev));
4656}
4657
c82e9aa0
EC
4658static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4659{
4660 struct mlx4_priv *priv = mlx4_priv(dev);
4661 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4662 struct list_head *eq_list =
4663 &tracker->slave_list[slave].res_list[RES_EQ];
4664 struct res_eq *eq;
4665 struct res_eq *tmp;
4666 int err;
4667 int state;
4668 LIST_HEAD(tlist);
4669 int eqn;
4670 struct mlx4_cmd_mailbox *mailbox;
4671
4672 err = move_all_busy(dev, slave, RES_EQ);
4673 if (err)
1a91de28
JP
4674 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4675 slave);
c82e9aa0
EC
4676
4677 spin_lock_irq(mlx4_tlock(dev));
4678 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4679 spin_unlock_irq(mlx4_tlock(dev));
4680 if (eq->com.owner == slave) {
4681 eqn = eq->com.res_id;
4682 state = eq->com.from_state;
4683 while (state != 0) {
4684 switch (state) {
4685 case RES_EQ_RESERVED:
4686 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4687 rb_erase(&eq->com.node,
4688 &tracker->res_tree[RES_EQ]);
c82e9aa0
EC
4689 list_del(&eq->com.list);
4690 spin_unlock_irq(mlx4_tlock(dev));
4691 kfree(eq);
4692 state = 0;
4693 break;
4694
4695 case RES_EQ_HW:
4696 mailbox = mlx4_alloc_cmd_mailbox(dev);
4697 if (IS_ERR(mailbox)) {
4698 cond_resched();
4699 continue;
4700 }
4701 err = mlx4_cmd_box(dev, slave, 0,
4702 eqn & 0xff, 0,
4703 MLX4_CMD_HW2SW_EQ,
4704 MLX4_CMD_TIME_CLASS_A,
4705 MLX4_CMD_NATIVE);
eb71d0d6 4706 if (err)
1a91de28
JP
4707 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4708 slave, eqn);
c82e9aa0 4709 mlx4_free_cmd_mailbox(dev, mailbox);
eb71d0d6
JM
4710 atomic_dec(&eq->mtt->ref_count);
4711 state = RES_EQ_RESERVED;
c82e9aa0
EC
4712 break;
4713
4714 default:
4715 state = 0;
4716 }
4717 }
4718 }
4719 spin_lock_irq(mlx4_tlock(dev));
4720 }
4721 spin_unlock_irq(mlx4_tlock(dev));
4722}
4723
ba062d52
JM
4724static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4725{
4726 struct mlx4_priv *priv = mlx4_priv(dev);
4727 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4728 struct list_head *counter_list =
4729 &tracker->slave_list[slave].res_list[RES_COUNTER];
4730 struct res_counter *counter;
4731 struct res_counter *tmp;
4732 int err;
4733 int index;
4734
4735 err = move_all_busy(dev, slave, RES_COUNTER);
4736 if (err)
1a91de28
JP
4737 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4738 slave);
ba062d52
JM
4739
4740 spin_lock_irq(mlx4_tlock(dev));
4741 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4742 if (counter->com.owner == slave) {
4743 index = counter->com.res_id;
4af1c048
HHZ
4744 rb_erase(&counter->com.node,
4745 &tracker->res_tree[RES_COUNTER]);
ba062d52
JM
4746 list_del(&counter->com.list);
4747 kfree(counter);
4748 __mlx4_counter_free(dev, index);
146f3ef4 4749 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
ba062d52
JM
4750 }
4751 }
4752 spin_unlock_irq(mlx4_tlock(dev));
4753}
4754
4755static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4756{
4757 struct mlx4_priv *priv = mlx4_priv(dev);
4758 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4759 struct list_head *xrcdn_list =
4760 &tracker->slave_list[slave].res_list[RES_XRCD];
4761 struct res_xrcdn *xrcd;
4762 struct res_xrcdn *tmp;
4763 int err;
4764 int xrcdn;
4765
4766 err = move_all_busy(dev, slave, RES_XRCD);
4767 if (err)
1a91de28
JP
4768 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4769 slave);
ba062d52
JM
4770
4771 spin_lock_irq(mlx4_tlock(dev));
4772 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4773 if (xrcd->com.owner == slave) {
4774 xrcdn = xrcd->com.res_id;
4af1c048 4775 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
ba062d52
JM
4776 list_del(&xrcd->com.list);
4777 kfree(xrcd);
4778 __mlx4_xrcd_free(dev, xrcdn);
4779 }
4780 }
4781 spin_unlock_irq(mlx4_tlock(dev));
4782}
4783
c82e9aa0
EC
4784void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4785{
4786 struct mlx4_priv *priv = mlx4_priv(dev);
111c6094 4787 mlx4_reset_roce_gids(dev, slave);
c82e9aa0 4788 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4874080d 4789 rem_slave_vlans(dev, slave);
c82e9aa0 4790 rem_slave_macs(dev, slave);
80cb0021 4791 rem_slave_fs_rule(dev, slave);
c82e9aa0
EC
4792 rem_slave_qps(dev, slave);
4793 rem_slave_srqs(dev, slave);
4794 rem_slave_cqs(dev, slave);
4795 rem_slave_mrs(dev, slave);
4796 rem_slave_eqs(dev, slave);
4797 rem_slave_mtts(dev, slave);
ba062d52
JM
4798 rem_slave_counters(dev, slave);
4799 rem_slave_xrcdns(dev, slave);
c82e9aa0
EC
4800 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4801}
b01978ca
JM
4802
4803void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4804{
4805 struct mlx4_vf_immed_vlan_work *work =
4806 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4807 struct mlx4_cmd_mailbox *mailbox;
4808 struct mlx4_update_qp_context *upd_context;
4809 struct mlx4_dev *dev = &work->priv->dev;
4810 struct mlx4_resource_tracker *tracker =
4811 &work->priv->mfunc.master.res_tracker;
4812 struct list_head *qp_list =
4813 &tracker->slave_list[work->slave].res_list[RES_QP];
4814 struct res_qp *qp;
4815 struct res_qp *tmp;
f0f829bf
RE
4816 u64 qp_path_mask_vlan_ctrl =
4817 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
b01978ca
JM
4818 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4819 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4820 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4821 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
f0f829bf
RE
4822 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4823
4824 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4825 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4826 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4827 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4828 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4829 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
b01978ca
JM
4830 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4831
4832 int err;
4833 int port, errors = 0;
4834 u8 vlan_control;
4835
4836 if (mlx4_is_slave(dev)) {
4837 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4838 work->slave);
4839 goto out;
4840 }
4841
4842 mailbox = mlx4_alloc_cmd_mailbox(dev);
4843 if (IS_ERR(mailbox))
4844 goto out;
0a6eac24
RE
4845 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4846 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4847 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4848 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4849 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4850 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4851 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4852 else if (!work->vlan_id)
b01978ca
JM
4853 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4854 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4855 else
4856 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4857 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4858 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4859
4860 upd_context = mailbox->buf;
311be98a 4861 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
b01978ca
JM
4862
4863 spin_lock_irq(mlx4_tlock(dev));
4864 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4865 spin_unlock_irq(mlx4_tlock(dev));
4866 if (qp->com.owner == work->slave) {
4867 if (qp->com.from_state != RES_QP_HW ||
4868 !qp->sched_queue || /* no INIT2RTR trans yet */
4869 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4870 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4871 spin_lock_irq(mlx4_tlock(dev));
4872 continue;
4873 }
4874 port = (qp->sched_queue >> 6 & 1) + 1;
4875 if (port != work->port) {
4876 spin_lock_irq(mlx4_tlock(dev));
4877 continue;
4878 }
f0f829bf
RE
4879 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4880 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4881 else
4882 upd_context->primary_addr_path_mask =
4883 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4884 if (work->vlan_id == MLX4_VGT) {
4885 upd_context->qp_context.param3 = qp->param3;
4886 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4887 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4888 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4889 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4890 upd_context->qp_context.pri_path.feup = qp->feup;
4891 upd_context->qp_context.pri_path.sched_queue =
4892 qp->sched_queue;
4893 } else {
4894 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4895 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4896 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4897 upd_context->qp_context.pri_path.fvl_rx =
4898 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4899 upd_context->qp_context.pri_path.fl =
4900 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4901 upd_context->qp_context.pri_path.feup =
4902 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4903 upd_context->qp_context.pri_path.sched_queue =
4904 qp->sched_queue & 0xC7;
4905 upd_context->qp_context.pri_path.sched_queue |=
4906 ((work->qos & 0x7) << 3);
4907 }
b01978ca
JM
4908
4909 err = mlx4_cmd(dev, mailbox->dma,
4910 qp->local_qpn & 0xffffff,
4911 0, MLX4_CMD_UPDATE_QP,
4912 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4913 if (err) {
1a91de28
JP
4914 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4915 work->slave, port, qp->local_qpn, err);
b01978ca
JM
4916 errors++;
4917 }
4918 }
4919 spin_lock_irq(mlx4_tlock(dev));
4920 }
4921 spin_unlock_irq(mlx4_tlock(dev));
4922 mlx4_free_cmd_mailbox(dev, mailbox);
4923
4924 if (errors)
4925 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4926 errors, work->slave, work->port);
4927
4928 /* unregister previous vlan_id if needed and we had no errors
4929 * while updating the QPs
4930 */
4931 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4932 NO_INDX != work->orig_vlan_ix)
4933 __mlx4_unregister_vlan(&work->priv->dev, work->port,
2009d005 4934 work->orig_vlan_id);
b01978ca
JM
4935out:
4936 kfree(work);
4937 return;
4938}