]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
HID: sony: Remove the size check for the Dualshock 4 HID Descriptor
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49 #include "mlx4_stats.h"
50
51 #define MLX4_MAC_VALID (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT 2
53 #define MLX4_VF_COUNTERS_PER_PORT 1
54
55 struct mac_res {
56 struct list_head list;
57 u64 mac;
58 int ref_count;
59 u8 smac_index;
60 u8 port;
61 };
62
63 struct vlan_res {
64 struct list_head list;
65 u16 vlan;
66 int ref_count;
67 int vlan_index;
68 u8 port;
69 };
70
71 struct res_common {
72 struct list_head list;
73 struct rb_node node;
74 u64 res_id;
75 int owner;
76 int state;
77 int from_state;
78 int to_state;
79 int removing;
80 };
81
82 enum {
83 RES_ANY_BUSY = 1
84 };
85
86 struct res_gid {
87 struct list_head list;
88 u8 gid[16];
89 enum mlx4_protocol prot;
90 enum mlx4_steer_type steer;
91 u64 reg_id;
92 };
93
94 enum res_qp_states {
95 RES_QP_BUSY = RES_ANY_BUSY,
96
97 /* QP number was allocated */
98 RES_QP_RESERVED,
99
100 /* ICM memory for QP context was mapped */
101 RES_QP_MAPPED,
102
103 /* QP is in hw ownership */
104 RES_QP_HW
105 };
106
107 struct res_qp {
108 struct res_common com;
109 struct res_mtt *mtt;
110 struct res_cq *rcq;
111 struct res_cq *scq;
112 struct res_srq *srq;
113 struct list_head mcg_list;
114 spinlock_t mcg_spl;
115 int local_qpn;
116 atomic_t ref_count;
117 u32 qpc_flags;
118 /* saved qp params before VST enforcement in order to restore on VGT */
119 u8 sched_queue;
120 __be32 param3;
121 u8 vlan_control;
122 u8 fvl_rx;
123 u8 pri_path_fl;
124 u8 vlan_index;
125 u8 feup;
126 };
127
128 enum res_mtt_states {
129 RES_MTT_BUSY = RES_ANY_BUSY,
130 RES_MTT_ALLOCATED,
131 };
132
133 static inline const char *mtt_states_str(enum res_mtt_states state)
134 {
135 switch (state) {
136 case RES_MTT_BUSY: return "RES_MTT_BUSY";
137 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
138 default: return "Unknown";
139 }
140 }
141
142 struct res_mtt {
143 struct res_common com;
144 int order;
145 atomic_t ref_count;
146 };
147
148 enum res_mpt_states {
149 RES_MPT_BUSY = RES_ANY_BUSY,
150 RES_MPT_RESERVED,
151 RES_MPT_MAPPED,
152 RES_MPT_HW,
153 };
154
155 struct res_mpt {
156 struct res_common com;
157 struct res_mtt *mtt;
158 int key;
159 };
160
161 enum res_eq_states {
162 RES_EQ_BUSY = RES_ANY_BUSY,
163 RES_EQ_RESERVED,
164 RES_EQ_HW,
165 };
166
167 struct res_eq {
168 struct res_common com;
169 struct res_mtt *mtt;
170 };
171
172 enum res_cq_states {
173 RES_CQ_BUSY = RES_ANY_BUSY,
174 RES_CQ_ALLOCATED,
175 RES_CQ_HW,
176 };
177
178 struct res_cq {
179 struct res_common com;
180 struct res_mtt *mtt;
181 atomic_t ref_count;
182 };
183
184 enum res_srq_states {
185 RES_SRQ_BUSY = RES_ANY_BUSY,
186 RES_SRQ_ALLOCATED,
187 RES_SRQ_HW,
188 };
189
190 struct res_srq {
191 struct res_common com;
192 struct res_mtt *mtt;
193 struct res_cq *cq;
194 atomic_t ref_count;
195 };
196
197 enum res_counter_states {
198 RES_COUNTER_BUSY = RES_ANY_BUSY,
199 RES_COUNTER_ALLOCATED,
200 };
201
202 struct res_counter {
203 struct res_common com;
204 int port;
205 };
206
207 enum res_xrcdn_states {
208 RES_XRCD_BUSY = RES_ANY_BUSY,
209 RES_XRCD_ALLOCATED,
210 };
211
212 struct res_xrcdn {
213 struct res_common com;
214 int port;
215 };
216
217 enum res_fs_rule_states {
218 RES_FS_RULE_BUSY = RES_ANY_BUSY,
219 RES_FS_RULE_ALLOCATED,
220 };
221
222 struct res_fs_rule {
223 struct res_common com;
224 int qpn;
225 };
226
227 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
228 {
229 struct rb_node *node = root->rb_node;
230
231 while (node) {
232 struct res_common *res = container_of(node, struct res_common,
233 node);
234
235 if (res_id < res->res_id)
236 node = node->rb_left;
237 else if (res_id > res->res_id)
238 node = node->rb_right;
239 else
240 return res;
241 }
242 return NULL;
243 }
244
245 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
246 {
247 struct rb_node **new = &(root->rb_node), *parent = NULL;
248
249 /* Figure out where to put new node */
250 while (*new) {
251 struct res_common *this = container_of(*new, struct res_common,
252 node);
253
254 parent = *new;
255 if (res->res_id < this->res_id)
256 new = &((*new)->rb_left);
257 else if (res->res_id > this->res_id)
258 new = &((*new)->rb_right);
259 else
260 return -EEXIST;
261 }
262
263 /* Add new node and rebalance tree. */
264 rb_link_node(&res->node, parent, new);
265 rb_insert_color(&res->node, root);
266
267 return 0;
268 }
269
270 enum qp_transition {
271 QP_TRANS_INIT2RTR,
272 QP_TRANS_RTR2RTS,
273 QP_TRANS_RTS2RTS,
274 QP_TRANS_SQERR2RTS,
275 QP_TRANS_SQD2SQD,
276 QP_TRANS_SQD2RTS
277 };
278
279 /* For Debug uses */
280 static const char *resource_str(enum mlx4_resource rt)
281 {
282 switch (rt) {
283 case RES_QP: return "RES_QP";
284 case RES_CQ: return "RES_CQ";
285 case RES_SRQ: return "RES_SRQ";
286 case RES_MPT: return "RES_MPT";
287 case RES_MTT: return "RES_MTT";
288 case RES_MAC: return "RES_MAC";
289 case RES_VLAN: return "RES_VLAN";
290 case RES_EQ: return "RES_EQ";
291 case RES_COUNTER: return "RES_COUNTER";
292 case RES_FS_RULE: return "RES_FS_RULE";
293 case RES_XRCD: return "RES_XRCD";
294 default: return "Unknown resource type !!!";
295 };
296 }
297
298 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
299 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
300 enum mlx4_resource res_type, int count,
301 int port)
302 {
303 struct mlx4_priv *priv = mlx4_priv(dev);
304 struct resource_allocator *res_alloc =
305 &priv->mfunc.master.res_tracker.res_alloc[res_type];
306 int err = -EINVAL;
307 int allocated, free, reserved, guaranteed, from_free;
308 int from_rsvd;
309
310 if (slave > dev->persist->num_vfs)
311 return -EINVAL;
312
313 spin_lock(&res_alloc->alloc_lock);
314 allocated = (port > 0) ?
315 res_alloc->allocated[(port - 1) *
316 (dev->persist->num_vfs + 1) + slave] :
317 res_alloc->allocated[slave];
318 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
319 res_alloc->res_free;
320 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
321 res_alloc->res_reserved;
322 guaranteed = res_alloc->guaranteed[slave];
323
324 if (allocated + count > res_alloc->quota[slave]) {
325 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
326 slave, port, resource_str(res_type), count,
327 allocated, res_alloc->quota[slave]);
328 goto out;
329 }
330
331 if (allocated + count <= guaranteed) {
332 err = 0;
333 from_rsvd = count;
334 } else {
335 /* portion may need to be obtained from free area */
336 if (guaranteed - allocated > 0)
337 from_free = count - (guaranteed - allocated);
338 else
339 from_free = count;
340
341 from_rsvd = count - from_free;
342
343 if (free - from_free >= reserved)
344 err = 0;
345 else
346 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
347 slave, port, resource_str(res_type), free,
348 from_free, reserved);
349 }
350
351 if (!err) {
352 /* grant the request */
353 if (port > 0) {
354 res_alloc->allocated[(port - 1) *
355 (dev->persist->num_vfs + 1) + slave] += count;
356 res_alloc->res_port_free[port - 1] -= count;
357 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
358 } else {
359 res_alloc->allocated[slave] += count;
360 res_alloc->res_free -= count;
361 res_alloc->res_reserved -= from_rsvd;
362 }
363 }
364
365 out:
366 spin_unlock(&res_alloc->alloc_lock);
367 return err;
368 }
369
370 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
371 enum mlx4_resource res_type, int count,
372 int port)
373 {
374 struct mlx4_priv *priv = mlx4_priv(dev);
375 struct resource_allocator *res_alloc =
376 &priv->mfunc.master.res_tracker.res_alloc[res_type];
377 int allocated, guaranteed, from_rsvd;
378
379 if (slave > dev->persist->num_vfs)
380 return;
381
382 spin_lock(&res_alloc->alloc_lock);
383
384 allocated = (port > 0) ?
385 res_alloc->allocated[(port - 1) *
386 (dev->persist->num_vfs + 1) + slave] :
387 res_alloc->allocated[slave];
388 guaranteed = res_alloc->guaranteed[slave];
389
390 if (allocated - count >= guaranteed) {
391 from_rsvd = 0;
392 } else {
393 /* portion may need to be returned to reserved area */
394 if (allocated - guaranteed > 0)
395 from_rsvd = count - (allocated - guaranteed);
396 else
397 from_rsvd = count;
398 }
399
400 if (port > 0) {
401 res_alloc->allocated[(port - 1) *
402 (dev->persist->num_vfs + 1) + slave] -= count;
403 res_alloc->res_port_free[port - 1] += count;
404 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
405 } else {
406 res_alloc->allocated[slave] -= count;
407 res_alloc->res_free += count;
408 res_alloc->res_reserved += from_rsvd;
409 }
410
411 spin_unlock(&res_alloc->alloc_lock);
412 return;
413 }
414
415 static inline void initialize_res_quotas(struct mlx4_dev *dev,
416 struct resource_allocator *res_alloc,
417 enum mlx4_resource res_type,
418 int vf, int num_instances)
419 {
420 res_alloc->guaranteed[vf] = num_instances /
421 (2 * (dev->persist->num_vfs + 1));
422 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
423 if (vf == mlx4_master_func_num(dev)) {
424 res_alloc->res_free = num_instances;
425 if (res_type == RES_MTT) {
426 /* reserved mtts will be taken out of the PF allocation */
427 res_alloc->res_free += dev->caps.reserved_mtts;
428 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
429 res_alloc->quota[vf] += dev->caps.reserved_mtts;
430 }
431 }
432 }
433
434 void mlx4_init_quotas(struct mlx4_dev *dev)
435 {
436 struct mlx4_priv *priv = mlx4_priv(dev);
437 int pf;
438
439 /* quotas for VFs are initialized in mlx4_slave_cap */
440 if (mlx4_is_slave(dev))
441 return;
442
443 if (!mlx4_is_mfunc(dev)) {
444 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
445 mlx4_num_reserved_sqps(dev);
446 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
447 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
448 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
449 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
450 return;
451 }
452
453 pf = mlx4_master_func_num(dev);
454 dev->quotas.qp =
455 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
456 dev->quotas.cq =
457 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
458 dev->quotas.srq =
459 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
460 dev->quotas.mtt =
461 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
462 dev->quotas.mpt =
463 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
464 }
465
466 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
467 {
468 /* reduce the sink counter */
469 return (dev->caps.max_counters - 1 -
470 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
471 / MLX4_MAX_PORTS;
472 }
473
474 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
475 {
476 struct mlx4_priv *priv = mlx4_priv(dev);
477 int i, j;
478 int t;
479 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
480
481 priv->mfunc.master.res_tracker.slave_list =
482 kzalloc(dev->num_slaves * sizeof(struct slave_list),
483 GFP_KERNEL);
484 if (!priv->mfunc.master.res_tracker.slave_list)
485 return -ENOMEM;
486
487 for (i = 0 ; i < dev->num_slaves; i++) {
488 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
489 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
490 slave_list[i].res_list[t]);
491 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
492 }
493
494 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
495 dev->num_slaves);
496 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
497 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
498
499 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
500 struct resource_allocator *res_alloc =
501 &priv->mfunc.master.res_tracker.res_alloc[i];
502 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
503 sizeof(int), GFP_KERNEL);
504 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
505 sizeof(int), GFP_KERNEL);
506 if (i == RES_MAC || i == RES_VLAN)
507 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
508 (dev->persist->num_vfs
509 + 1) *
510 sizeof(int), GFP_KERNEL);
511 else
512 res_alloc->allocated = kzalloc((dev->persist->
513 num_vfs + 1) *
514 sizeof(int), GFP_KERNEL);
515 /* Reduce the sink counter */
516 if (i == RES_COUNTER)
517 res_alloc->res_free = dev->caps.max_counters - 1;
518
519 if (!res_alloc->quota || !res_alloc->guaranteed ||
520 !res_alloc->allocated)
521 goto no_mem_err;
522
523 spin_lock_init(&res_alloc->alloc_lock);
524 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
525 struct mlx4_active_ports actv_ports =
526 mlx4_get_active_ports(dev, t);
527 switch (i) {
528 case RES_QP:
529 initialize_res_quotas(dev, res_alloc, RES_QP,
530 t, dev->caps.num_qps -
531 dev->caps.reserved_qps -
532 mlx4_num_reserved_sqps(dev));
533 break;
534 case RES_CQ:
535 initialize_res_quotas(dev, res_alloc, RES_CQ,
536 t, dev->caps.num_cqs -
537 dev->caps.reserved_cqs);
538 break;
539 case RES_SRQ:
540 initialize_res_quotas(dev, res_alloc, RES_SRQ,
541 t, dev->caps.num_srqs -
542 dev->caps.reserved_srqs);
543 break;
544 case RES_MPT:
545 initialize_res_quotas(dev, res_alloc, RES_MPT,
546 t, dev->caps.num_mpts -
547 dev->caps.reserved_mrws);
548 break;
549 case RES_MTT:
550 initialize_res_quotas(dev, res_alloc, RES_MTT,
551 t, dev->caps.num_mtts -
552 dev->caps.reserved_mtts);
553 break;
554 case RES_MAC:
555 if (t == mlx4_master_func_num(dev)) {
556 int max_vfs_pport = 0;
557 /* Calculate the max vfs per port for */
558 /* both ports. */
559 for (j = 0; j < dev->caps.num_ports;
560 j++) {
561 struct mlx4_slaves_pport slaves_pport =
562 mlx4_phys_to_slaves_pport(dev, j + 1);
563 unsigned current_slaves =
564 bitmap_weight(slaves_pport.slaves,
565 dev->caps.num_ports) - 1;
566 if (max_vfs_pport < current_slaves)
567 max_vfs_pport =
568 current_slaves;
569 }
570 res_alloc->quota[t] =
571 MLX4_MAX_MAC_NUM -
572 2 * max_vfs_pport;
573 res_alloc->guaranteed[t] = 2;
574 for (j = 0; j < MLX4_MAX_PORTS; j++)
575 res_alloc->res_port_free[j] =
576 MLX4_MAX_MAC_NUM;
577 } else {
578 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
579 res_alloc->guaranteed[t] = 2;
580 }
581 break;
582 case RES_VLAN:
583 if (t == mlx4_master_func_num(dev)) {
584 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
585 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
586 for (j = 0; j < MLX4_MAX_PORTS; j++)
587 res_alloc->res_port_free[j] =
588 res_alloc->quota[t];
589 } else {
590 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
591 res_alloc->guaranteed[t] = 0;
592 }
593 break;
594 case RES_COUNTER:
595 res_alloc->quota[t] = dev->caps.max_counters;
596 if (t == mlx4_master_func_num(dev))
597 res_alloc->guaranteed[t] =
598 MLX4_PF_COUNTERS_PER_PORT *
599 MLX4_MAX_PORTS;
600 else if (t <= max_vfs_guarantee_counter)
601 res_alloc->guaranteed[t] =
602 MLX4_VF_COUNTERS_PER_PORT *
603 MLX4_MAX_PORTS;
604 else
605 res_alloc->guaranteed[t] = 0;
606 res_alloc->res_free -= res_alloc->guaranteed[t];
607 break;
608 default:
609 break;
610 }
611 if (i == RES_MAC || i == RES_VLAN) {
612 for (j = 0; j < dev->caps.num_ports; j++)
613 if (test_bit(j, actv_ports.ports))
614 res_alloc->res_port_rsvd[j] +=
615 res_alloc->guaranteed[t];
616 } else {
617 res_alloc->res_reserved += res_alloc->guaranteed[t];
618 }
619 }
620 }
621 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
622 return 0;
623
624 no_mem_err:
625 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
626 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
627 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
628 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
629 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
630 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
631 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
632 }
633 return -ENOMEM;
634 }
635
636 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
637 enum mlx4_res_tracker_free_type type)
638 {
639 struct mlx4_priv *priv = mlx4_priv(dev);
640 int i;
641
642 if (priv->mfunc.master.res_tracker.slave_list) {
643 if (type != RES_TR_FREE_STRUCTS_ONLY) {
644 for (i = 0; i < dev->num_slaves; i++) {
645 if (type == RES_TR_FREE_ALL ||
646 dev->caps.function != i)
647 mlx4_delete_all_resources_for_slave(dev, i);
648 }
649 /* free master's vlans */
650 i = dev->caps.function;
651 mlx4_reset_roce_gids(dev, i);
652 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
653 rem_slave_vlans(dev, i);
654 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
655 }
656
657 if (type != RES_TR_FREE_SLAVES_ONLY) {
658 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
659 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
660 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
661 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
662 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
663 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
664 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
665 }
666 kfree(priv->mfunc.master.res_tracker.slave_list);
667 priv->mfunc.master.res_tracker.slave_list = NULL;
668 }
669 }
670 }
671
672 static void update_pkey_index(struct mlx4_dev *dev, int slave,
673 struct mlx4_cmd_mailbox *inbox)
674 {
675 u8 sched = *(u8 *)(inbox->buf + 64);
676 u8 orig_index = *(u8 *)(inbox->buf + 35);
677 u8 new_index;
678 struct mlx4_priv *priv = mlx4_priv(dev);
679 int port;
680
681 port = (sched >> 6 & 1) + 1;
682
683 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
684 *(u8 *)(inbox->buf + 35) = new_index;
685 }
686
687 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
688 u8 slave)
689 {
690 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
691 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
692 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
693 int port;
694
695 if (MLX4_QP_ST_UD == ts) {
696 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
697 if (mlx4_is_eth(dev, port))
698 qp_ctx->pri_path.mgid_index =
699 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
700 else
701 qp_ctx->pri_path.mgid_index = slave | 0x80;
702
703 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
704 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
705 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
706 if (mlx4_is_eth(dev, port)) {
707 qp_ctx->pri_path.mgid_index +=
708 mlx4_get_base_gid_ix(dev, slave, port);
709 qp_ctx->pri_path.mgid_index &= 0x7f;
710 } else {
711 qp_ctx->pri_path.mgid_index = slave & 0x7F;
712 }
713 }
714 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
715 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
716 if (mlx4_is_eth(dev, port)) {
717 qp_ctx->alt_path.mgid_index +=
718 mlx4_get_base_gid_ix(dev, slave, port);
719 qp_ctx->alt_path.mgid_index &= 0x7f;
720 } else {
721 qp_ctx->alt_path.mgid_index = slave & 0x7F;
722 }
723 }
724 }
725 }
726
727 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
728 u8 slave, int port);
729
730 static int update_vport_qp_param(struct mlx4_dev *dev,
731 struct mlx4_cmd_mailbox *inbox,
732 u8 slave, u32 qpn)
733 {
734 struct mlx4_qp_context *qpc = inbox->buf + 8;
735 struct mlx4_vport_oper_state *vp_oper;
736 struct mlx4_priv *priv;
737 u32 qp_type;
738 int port, err = 0;
739
740 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
741 priv = mlx4_priv(dev);
742 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
743 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
744
745 err = handle_counter(dev, qpc, slave, port);
746 if (err)
747 goto out;
748
749 if (MLX4_VGT != vp_oper->state.default_vlan) {
750 /* the reserved QPs (special, proxy, tunnel)
751 * do not operate over vlans
752 */
753 if (mlx4_is_qp_reserved(dev, qpn))
754 return 0;
755
756 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
757 if (qp_type == MLX4_QP_ST_UD ||
758 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
759 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
760 *(__be32 *)inbox->buf =
761 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
762 MLX4_QP_OPTPAR_VLAN_STRIPPING);
763 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
764 } else {
765 struct mlx4_update_qp_params params = {.flags = 0};
766
767 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
768 if (err)
769 goto out;
770 }
771 }
772
773 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
774 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
775 qpc->pri_path.vlan_control =
776 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
777 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
778 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
779 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
780 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
781 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
782 } else if (0 != vp_oper->state.default_vlan) {
783 qpc->pri_path.vlan_control =
784 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
785 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
786 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
787 } else { /* priority tagged */
788 qpc->pri_path.vlan_control =
789 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
790 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
791 }
792
793 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
794 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
795 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
796 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
797 qpc->pri_path.sched_queue &= 0xC7;
798 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
799 qpc->qos_vport = vp_oper->state.qos_vport;
800 }
801 if (vp_oper->state.spoofchk) {
802 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
803 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
804 }
805 out:
806 return err;
807 }
808
809 static int mpt_mask(struct mlx4_dev *dev)
810 {
811 return dev->caps.num_mpts - 1;
812 }
813
814 static void *find_res(struct mlx4_dev *dev, u64 res_id,
815 enum mlx4_resource type)
816 {
817 struct mlx4_priv *priv = mlx4_priv(dev);
818
819 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
820 res_id);
821 }
822
823 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
824 enum mlx4_resource type,
825 void *res)
826 {
827 struct res_common *r;
828 int err = 0;
829
830 spin_lock_irq(mlx4_tlock(dev));
831 r = find_res(dev, res_id, type);
832 if (!r) {
833 err = -ENONET;
834 goto exit;
835 }
836
837 if (r->state == RES_ANY_BUSY) {
838 err = -EBUSY;
839 goto exit;
840 }
841
842 if (r->owner != slave) {
843 err = -EPERM;
844 goto exit;
845 }
846
847 r->from_state = r->state;
848 r->state = RES_ANY_BUSY;
849
850 if (res)
851 *((struct res_common **)res) = r;
852
853 exit:
854 spin_unlock_irq(mlx4_tlock(dev));
855 return err;
856 }
857
858 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
859 enum mlx4_resource type,
860 u64 res_id, int *slave)
861 {
862
863 struct res_common *r;
864 int err = -ENOENT;
865 int id = res_id;
866
867 if (type == RES_QP)
868 id &= 0x7fffff;
869 spin_lock(mlx4_tlock(dev));
870
871 r = find_res(dev, id, type);
872 if (r) {
873 *slave = r->owner;
874 err = 0;
875 }
876 spin_unlock(mlx4_tlock(dev));
877
878 return err;
879 }
880
881 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
882 enum mlx4_resource type)
883 {
884 struct res_common *r;
885
886 spin_lock_irq(mlx4_tlock(dev));
887 r = find_res(dev, res_id, type);
888 if (r)
889 r->state = r->from_state;
890 spin_unlock_irq(mlx4_tlock(dev));
891 }
892
893 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
894 u64 in_param, u64 *out_param, int port);
895
896 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
897 int counter_index)
898 {
899 struct res_common *r;
900 struct res_counter *counter;
901 int ret = 0;
902
903 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
904 return ret;
905
906 spin_lock_irq(mlx4_tlock(dev));
907 r = find_res(dev, counter_index, RES_COUNTER);
908 if (!r || r->owner != slave)
909 ret = -EINVAL;
910 counter = container_of(r, struct res_counter, com);
911 if (!counter->port)
912 counter->port = port;
913
914 spin_unlock_irq(mlx4_tlock(dev));
915 return ret;
916 }
917
918 static int handle_unexisting_counter(struct mlx4_dev *dev,
919 struct mlx4_qp_context *qpc, u8 slave,
920 int port)
921 {
922 struct mlx4_priv *priv = mlx4_priv(dev);
923 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
924 struct res_common *tmp;
925 struct res_counter *counter;
926 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
927 int err = 0;
928
929 spin_lock_irq(mlx4_tlock(dev));
930 list_for_each_entry(tmp,
931 &tracker->slave_list[slave].res_list[RES_COUNTER],
932 list) {
933 counter = container_of(tmp, struct res_counter, com);
934 if (port == counter->port) {
935 qpc->pri_path.counter_index = counter->com.res_id;
936 spin_unlock_irq(mlx4_tlock(dev));
937 return 0;
938 }
939 }
940 spin_unlock_irq(mlx4_tlock(dev));
941
942 /* No existing counter, need to allocate a new counter */
943 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
944 port);
945 if (err == -ENOENT) {
946 err = 0;
947 } else if (err && err != -ENOSPC) {
948 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
949 __func__, slave, err);
950 } else {
951 qpc->pri_path.counter_index = counter_idx;
952 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
953 __func__, slave, qpc->pri_path.counter_index);
954 err = 0;
955 }
956
957 return err;
958 }
959
960 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
961 u8 slave, int port)
962 {
963 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
964 return handle_existing_counter(dev, slave, port,
965 qpc->pri_path.counter_index);
966
967 return handle_unexisting_counter(dev, qpc, slave, port);
968 }
969
970 static struct res_common *alloc_qp_tr(int id)
971 {
972 struct res_qp *ret;
973
974 ret = kzalloc(sizeof *ret, GFP_KERNEL);
975 if (!ret)
976 return NULL;
977
978 ret->com.res_id = id;
979 ret->com.state = RES_QP_RESERVED;
980 ret->local_qpn = id;
981 INIT_LIST_HEAD(&ret->mcg_list);
982 spin_lock_init(&ret->mcg_spl);
983 atomic_set(&ret->ref_count, 0);
984
985 return &ret->com;
986 }
987
988 static struct res_common *alloc_mtt_tr(int id, int order)
989 {
990 struct res_mtt *ret;
991
992 ret = kzalloc(sizeof *ret, GFP_KERNEL);
993 if (!ret)
994 return NULL;
995
996 ret->com.res_id = id;
997 ret->order = order;
998 ret->com.state = RES_MTT_ALLOCATED;
999 atomic_set(&ret->ref_count, 0);
1000
1001 return &ret->com;
1002 }
1003
1004 static struct res_common *alloc_mpt_tr(int id, int key)
1005 {
1006 struct res_mpt *ret;
1007
1008 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1009 if (!ret)
1010 return NULL;
1011
1012 ret->com.res_id = id;
1013 ret->com.state = RES_MPT_RESERVED;
1014 ret->key = key;
1015
1016 return &ret->com;
1017 }
1018
1019 static struct res_common *alloc_eq_tr(int id)
1020 {
1021 struct res_eq *ret;
1022
1023 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1024 if (!ret)
1025 return NULL;
1026
1027 ret->com.res_id = id;
1028 ret->com.state = RES_EQ_RESERVED;
1029
1030 return &ret->com;
1031 }
1032
1033 static struct res_common *alloc_cq_tr(int id)
1034 {
1035 struct res_cq *ret;
1036
1037 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1038 if (!ret)
1039 return NULL;
1040
1041 ret->com.res_id = id;
1042 ret->com.state = RES_CQ_ALLOCATED;
1043 atomic_set(&ret->ref_count, 0);
1044
1045 return &ret->com;
1046 }
1047
1048 static struct res_common *alloc_srq_tr(int id)
1049 {
1050 struct res_srq *ret;
1051
1052 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1053 if (!ret)
1054 return NULL;
1055
1056 ret->com.res_id = id;
1057 ret->com.state = RES_SRQ_ALLOCATED;
1058 atomic_set(&ret->ref_count, 0);
1059
1060 return &ret->com;
1061 }
1062
1063 static struct res_common *alloc_counter_tr(int id, int port)
1064 {
1065 struct res_counter *ret;
1066
1067 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1068 if (!ret)
1069 return NULL;
1070
1071 ret->com.res_id = id;
1072 ret->com.state = RES_COUNTER_ALLOCATED;
1073 ret->port = port;
1074
1075 return &ret->com;
1076 }
1077
1078 static struct res_common *alloc_xrcdn_tr(int id)
1079 {
1080 struct res_xrcdn *ret;
1081
1082 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1083 if (!ret)
1084 return NULL;
1085
1086 ret->com.res_id = id;
1087 ret->com.state = RES_XRCD_ALLOCATED;
1088
1089 return &ret->com;
1090 }
1091
1092 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1093 {
1094 struct res_fs_rule *ret;
1095
1096 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1097 if (!ret)
1098 return NULL;
1099
1100 ret->com.res_id = id;
1101 ret->com.state = RES_FS_RULE_ALLOCATED;
1102 ret->qpn = qpn;
1103 return &ret->com;
1104 }
1105
1106 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1107 int extra)
1108 {
1109 struct res_common *ret;
1110
1111 switch (type) {
1112 case RES_QP:
1113 ret = alloc_qp_tr(id);
1114 break;
1115 case RES_MPT:
1116 ret = alloc_mpt_tr(id, extra);
1117 break;
1118 case RES_MTT:
1119 ret = alloc_mtt_tr(id, extra);
1120 break;
1121 case RES_EQ:
1122 ret = alloc_eq_tr(id);
1123 break;
1124 case RES_CQ:
1125 ret = alloc_cq_tr(id);
1126 break;
1127 case RES_SRQ:
1128 ret = alloc_srq_tr(id);
1129 break;
1130 case RES_MAC:
1131 pr_err("implementation missing\n");
1132 return NULL;
1133 case RES_COUNTER:
1134 ret = alloc_counter_tr(id, extra);
1135 break;
1136 case RES_XRCD:
1137 ret = alloc_xrcdn_tr(id);
1138 break;
1139 case RES_FS_RULE:
1140 ret = alloc_fs_rule_tr(id, extra);
1141 break;
1142 default:
1143 return NULL;
1144 }
1145 if (ret)
1146 ret->owner = slave;
1147
1148 return ret;
1149 }
1150
1151 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1152 struct mlx4_counter *data)
1153 {
1154 struct mlx4_priv *priv = mlx4_priv(dev);
1155 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1156 struct res_common *tmp;
1157 struct res_counter *counter;
1158 int *counters_arr;
1159 int i = 0, err = 0;
1160
1161 memset(data, 0, sizeof(*data));
1162
1163 counters_arr = kmalloc_array(dev->caps.max_counters,
1164 sizeof(*counters_arr), GFP_KERNEL);
1165 if (!counters_arr)
1166 return -ENOMEM;
1167
1168 spin_lock_irq(mlx4_tlock(dev));
1169 list_for_each_entry(tmp,
1170 &tracker->slave_list[slave].res_list[RES_COUNTER],
1171 list) {
1172 counter = container_of(tmp, struct res_counter, com);
1173 if (counter->port == port) {
1174 counters_arr[i] = (int)tmp->res_id;
1175 i++;
1176 }
1177 }
1178 spin_unlock_irq(mlx4_tlock(dev));
1179 counters_arr[i] = -1;
1180
1181 i = 0;
1182
1183 while (counters_arr[i] != -1) {
1184 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1185 0);
1186 if (err) {
1187 memset(data, 0, sizeof(*data));
1188 goto table_changed;
1189 }
1190 i++;
1191 }
1192
1193 table_changed:
1194 kfree(counters_arr);
1195 return 0;
1196 }
1197
1198 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1199 enum mlx4_resource type, int extra)
1200 {
1201 int i;
1202 int err;
1203 struct mlx4_priv *priv = mlx4_priv(dev);
1204 struct res_common **res_arr;
1205 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1206 struct rb_root *root = &tracker->res_tree[type];
1207
1208 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1209 if (!res_arr)
1210 return -ENOMEM;
1211
1212 for (i = 0; i < count; ++i) {
1213 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1214 if (!res_arr[i]) {
1215 for (--i; i >= 0; --i)
1216 kfree(res_arr[i]);
1217
1218 kfree(res_arr);
1219 return -ENOMEM;
1220 }
1221 }
1222
1223 spin_lock_irq(mlx4_tlock(dev));
1224 for (i = 0; i < count; ++i) {
1225 if (find_res(dev, base + i, type)) {
1226 err = -EEXIST;
1227 goto undo;
1228 }
1229 err = res_tracker_insert(root, res_arr[i]);
1230 if (err)
1231 goto undo;
1232 list_add_tail(&res_arr[i]->list,
1233 &tracker->slave_list[slave].res_list[type]);
1234 }
1235 spin_unlock_irq(mlx4_tlock(dev));
1236 kfree(res_arr);
1237
1238 return 0;
1239
1240 undo:
1241 for (--i; i >= 0; --i) {
1242 rb_erase(&res_arr[i]->node, root);
1243 list_del_init(&res_arr[i]->list);
1244 }
1245
1246 spin_unlock_irq(mlx4_tlock(dev));
1247
1248 for (i = 0; i < count; ++i)
1249 kfree(res_arr[i]);
1250
1251 kfree(res_arr);
1252
1253 return err;
1254 }
1255
1256 static int remove_qp_ok(struct res_qp *res)
1257 {
1258 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1259 !list_empty(&res->mcg_list)) {
1260 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1261 res->com.state, atomic_read(&res->ref_count));
1262 return -EBUSY;
1263 } else if (res->com.state != RES_QP_RESERVED) {
1264 return -EPERM;
1265 }
1266
1267 return 0;
1268 }
1269
1270 static int remove_mtt_ok(struct res_mtt *res, int order)
1271 {
1272 if (res->com.state == RES_MTT_BUSY ||
1273 atomic_read(&res->ref_count)) {
1274 pr_devel("%s-%d: state %s, ref_count %d\n",
1275 __func__, __LINE__,
1276 mtt_states_str(res->com.state),
1277 atomic_read(&res->ref_count));
1278 return -EBUSY;
1279 } else if (res->com.state != RES_MTT_ALLOCATED)
1280 return -EPERM;
1281 else if (res->order != order)
1282 return -EINVAL;
1283
1284 return 0;
1285 }
1286
1287 static int remove_mpt_ok(struct res_mpt *res)
1288 {
1289 if (res->com.state == RES_MPT_BUSY)
1290 return -EBUSY;
1291 else if (res->com.state != RES_MPT_RESERVED)
1292 return -EPERM;
1293
1294 return 0;
1295 }
1296
1297 static int remove_eq_ok(struct res_eq *res)
1298 {
1299 if (res->com.state == RES_MPT_BUSY)
1300 return -EBUSY;
1301 else if (res->com.state != RES_MPT_RESERVED)
1302 return -EPERM;
1303
1304 return 0;
1305 }
1306
1307 static int remove_counter_ok(struct res_counter *res)
1308 {
1309 if (res->com.state == RES_COUNTER_BUSY)
1310 return -EBUSY;
1311 else if (res->com.state != RES_COUNTER_ALLOCATED)
1312 return -EPERM;
1313
1314 return 0;
1315 }
1316
1317 static int remove_xrcdn_ok(struct res_xrcdn *res)
1318 {
1319 if (res->com.state == RES_XRCD_BUSY)
1320 return -EBUSY;
1321 else if (res->com.state != RES_XRCD_ALLOCATED)
1322 return -EPERM;
1323
1324 return 0;
1325 }
1326
1327 static int remove_fs_rule_ok(struct res_fs_rule *res)
1328 {
1329 if (res->com.state == RES_FS_RULE_BUSY)
1330 return -EBUSY;
1331 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1332 return -EPERM;
1333
1334 return 0;
1335 }
1336
1337 static int remove_cq_ok(struct res_cq *res)
1338 {
1339 if (res->com.state == RES_CQ_BUSY)
1340 return -EBUSY;
1341 else if (res->com.state != RES_CQ_ALLOCATED)
1342 return -EPERM;
1343
1344 return 0;
1345 }
1346
1347 static int remove_srq_ok(struct res_srq *res)
1348 {
1349 if (res->com.state == RES_SRQ_BUSY)
1350 return -EBUSY;
1351 else if (res->com.state != RES_SRQ_ALLOCATED)
1352 return -EPERM;
1353
1354 return 0;
1355 }
1356
1357 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1358 {
1359 switch (type) {
1360 case RES_QP:
1361 return remove_qp_ok((struct res_qp *)res);
1362 case RES_CQ:
1363 return remove_cq_ok((struct res_cq *)res);
1364 case RES_SRQ:
1365 return remove_srq_ok((struct res_srq *)res);
1366 case RES_MPT:
1367 return remove_mpt_ok((struct res_mpt *)res);
1368 case RES_MTT:
1369 return remove_mtt_ok((struct res_mtt *)res, extra);
1370 case RES_MAC:
1371 return -ENOSYS;
1372 case RES_EQ:
1373 return remove_eq_ok((struct res_eq *)res);
1374 case RES_COUNTER:
1375 return remove_counter_ok((struct res_counter *)res);
1376 case RES_XRCD:
1377 return remove_xrcdn_ok((struct res_xrcdn *)res);
1378 case RES_FS_RULE:
1379 return remove_fs_rule_ok((struct res_fs_rule *)res);
1380 default:
1381 return -EINVAL;
1382 }
1383 }
1384
1385 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1386 enum mlx4_resource type, int extra)
1387 {
1388 u64 i;
1389 int err;
1390 struct mlx4_priv *priv = mlx4_priv(dev);
1391 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1392 struct res_common *r;
1393
1394 spin_lock_irq(mlx4_tlock(dev));
1395 for (i = base; i < base + count; ++i) {
1396 r = res_tracker_lookup(&tracker->res_tree[type], i);
1397 if (!r) {
1398 err = -ENOENT;
1399 goto out;
1400 }
1401 if (r->owner != slave) {
1402 err = -EPERM;
1403 goto out;
1404 }
1405 err = remove_ok(r, type, extra);
1406 if (err)
1407 goto out;
1408 }
1409
1410 for (i = base; i < base + count; ++i) {
1411 r = res_tracker_lookup(&tracker->res_tree[type], i);
1412 rb_erase(&r->node, &tracker->res_tree[type]);
1413 list_del(&r->list);
1414 kfree(r);
1415 }
1416 err = 0;
1417
1418 out:
1419 spin_unlock_irq(mlx4_tlock(dev));
1420
1421 return err;
1422 }
1423
1424 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1425 enum res_qp_states state, struct res_qp **qp,
1426 int alloc)
1427 {
1428 struct mlx4_priv *priv = mlx4_priv(dev);
1429 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1430 struct res_qp *r;
1431 int err = 0;
1432
1433 spin_lock_irq(mlx4_tlock(dev));
1434 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1435 if (!r)
1436 err = -ENOENT;
1437 else if (r->com.owner != slave)
1438 err = -EPERM;
1439 else {
1440 switch (state) {
1441 case RES_QP_BUSY:
1442 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1443 __func__, r->com.res_id);
1444 err = -EBUSY;
1445 break;
1446
1447 case RES_QP_RESERVED:
1448 if (r->com.state == RES_QP_MAPPED && !alloc)
1449 break;
1450
1451 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1452 err = -EINVAL;
1453 break;
1454
1455 case RES_QP_MAPPED:
1456 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1457 r->com.state == RES_QP_HW)
1458 break;
1459 else {
1460 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1461 r->com.res_id);
1462 err = -EINVAL;
1463 }
1464
1465 break;
1466
1467 case RES_QP_HW:
1468 if (r->com.state != RES_QP_MAPPED)
1469 err = -EINVAL;
1470 break;
1471 default:
1472 err = -EINVAL;
1473 }
1474
1475 if (!err) {
1476 r->com.from_state = r->com.state;
1477 r->com.to_state = state;
1478 r->com.state = RES_QP_BUSY;
1479 if (qp)
1480 *qp = r;
1481 }
1482 }
1483
1484 spin_unlock_irq(mlx4_tlock(dev));
1485
1486 return err;
1487 }
1488
1489 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1490 enum res_mpt_states state, struct res_mpt **mpt)
1491 {
1492 struct mlx4_priv *priv = mlx4_priv(dev);
1493 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1494 struct res_mpt *r;
1495 int err = 0;
1496
1497 spin_lock_irq(mlx4_tlock(dev));
1498 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1499 if (!r)
1500 err = -ENOENT;
1501 else if (r->com.owner != slave)
1502 err = -EPERM;
1503 else {
1504 switch (state) {
1505 case RES_MPT_BUSY:
1506 err = -EINVAL;
1507 break;
1508
1509 case RES_MPT_RESERVED:
1510 if (r->com.state != RES_MPT_MAPPED)
1511 err = -EINVAL;
1512 break;
1513
1514 case RES_MPT_MAPPED:
1515 if (r->com.state != RES_MPT_RESERVED &&
1516 r->com.state != RES_MPT_HW)
1517 err = -EINVAL;
1518 break;
1519
1520 case RES_MPT_HW:
1521 if (r->com.state != RES_MPT_MAPPED)
1522 err = -EINVAL;
1523 break;
1524 default:
1525 err = -EINVAL;
1526 }
1527
1528 if (!err) {
1529 r->com.from_state = r->com.state;
1530 r->com.to_state = state;
1531 r->com.state = RES_MPT_BUSY;
1532 if (mpt)
1533 *mpt = r;
1534 }
1535 }
1536
1537 spin_unlock_irq(mlx4_tlock(dev));
1538
1539 return err;
1540 }
1541
1542 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1543 enum res_eq_states state, struct res_eq **eq)
1544 {
1545 struct mlx4_priv *priv = mlx4_priv(dev);
1546 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1547 struct res_eq *r;
1548 int err = 0;
1549
1550 spin_lock_irq(mlx4_tlock(dev));
1551 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1552 if (!r)
1553 err = -ENOENT;
1554 else if (r->com.owner != slave)
1555 err = -EPERM;
1556 else {
1557 switch (state) {
1558 case RES_EQ_BUSY:
1559 err = -EINVAL;
1560 break;
1561
1562 case RES_EQ_RESERVED:
1563 if (r->com.state != RES_EQ_HW)
1564 err = -EINVAL;
1565 break;
1566
1567 case RES_EQ_HW:
1568 if (r->com.state != RES_EQ_RESERVED)
1569 err = -EINVAL;
1570 break;
1571
1572 default:
1573 err = -EINVAL;
1574 }
1575
1576 if (!err) {
1577 r->com.from_state = r->com.state;
1578 r->com.to_state = state;
1579 r->com.state = RES_EQ_BUSY;
1580 if (eq)
1581 *eq = r;
1582 }
1583 }
1584
1585 spin_unlock_irq(mlx4_tlock(dev));
1586
1587 return err;
1588 }
1589
1590 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1591 enum res_cq_states state, struct res_cq **cq)
1592 {
1593 struct mlx4_priv *priv = mlx4_priv(dev);
1594 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1595 struct res_cq *r;
1596 int err;
1597
1598 spin_lock_irq(mlx4_tlock(dev));
1599 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1600 if (!r) {
1601 err = -ENOENT;
1602 } else if (r->com.owner != slave) {
1603 err = -EPERM;
1604 } else if (state == RES_CQ_ALLOCATED) {
1605 if (r->com.state != RES_CQ_HW)
1606 err = -EINVAL;
1607 else if (atomic_read(&r->ref_count))
1608 err = -EBUSY;
1609 else
1610 err = 0;
1611 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1612 err = -EINVAL;
1613 } else {
1614 err = 0;
1615 }
1616
1617 if (!err) {
1618 r->com.from_state = r->com.state;
1619 r->com.to_state = state;
1620 r->com.state = RES_CQ_BUSY;
1621 if (cq)
1622 *cq = r;
1623 }
1624
1625 spin_unlock_irq(mlx4_tlock(dev));
1626
1627 return err;
1628 }
1629
1630 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1631 enum res_srq_states state, struct res_srq **srq)
1632 {
1633 struct mlx4_priv *priv = mlx4_priv(dev);
1634 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1635 struct res_srq *r;
1636 int err = 0;
1637
1638 spin_lock_irq(mlx4_tlock(dev));
1639 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1640 if (!r) {
1641 err = -ENOENT;
1642 } else if (r->com.owner != slave) {
1643 err = -EPERM;
1644 } else if (state == RES_SRQ_ALLOCATED) {
1645 if (r->com.state != RES_SRQ_HW)
1646 err = -EINVAL;
1647 else if (atomic_read(&r->ref_count))
1648 err = -EBUSY;
1649 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1650 err = -EINVAL;
1651 }
1652
1653 if (!err) {
1654 r->com.from_state = r->com.state;
1655 r->com.to_state = state;
1656 r->com.state = RES_SRQ_BUSY;
1657 if (srq)
1658 *srq = r;
1659 }
1660
1661 spin_unlock_irq(mlx4_tlock(dev));
1662
1663 return err;
1664 }
1665
1666 static void res_abort_move(struct mlx4_dev *dev, int slave,
1667 enum mlx4_resource type, int id)
1668 {
1669 struct mlx4_priv *priv = mlx4_priv(dev);
1670 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1671 struct res_common *r;
1672
1673 spin_lock_irq(mlx4_tlock(dev));
1674 r = res_tracker_lookup(&tracker->res_tree[type], id);
1675 if (r && (r->owner == slave))
1676 r->state = r->from_state;
1677 spin_unlock_irq(mlx4_tlock(dev));
1678 }
1679
1680 static void res_end_move(struct mlx4_dev *dev, int slave,
1681 enum mlx4_resource type, int id)
1682 {
1683 struct mlx4_priv *priv = mlx4_priv(dev);
1684 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1685 struct res_common *r;
1686
1687 spin_lock_irq(mlx4_tlock(dev));
1688 r = res_tracker_lookup(&tracker->res_tree[type], id);
1689 if (r && (r->owner == slave))
1690 r->state = r->to_state;
1691 spin_unlock_irq(mlx4_tlock(dev));
1692 }
1693
1694 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1695 {
1696 return mlx4_is_qp_reserved(dev, qpn) &&
1697 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1698 }
1699
1700 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1701 {
1702 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1703 }
1704
1705 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1706 u64 in_param, u64 *out_param)
1707 {
1708 int err;
1709 int count;
1710 int align;
1711 int base;
1712 int qpn;
1713 u8 flags;
1714
1715 switch (op) {
1716 case RES_OP_RESERVE:
1717 count = get_param_l(&in_param) & 0xffffff;
1718 /* Turn off all unsupported QP allocation flags that the
1719 * slave tries to set.
1720 */
1721 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1722 align = get_param_h(&in_param);
1723 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1724 if (err)
1725 return err;
1726
1727 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1728 if (err) {
1729 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1730 return err;
1731 }
1732
1733 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1734 if (err) {
1735 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1736 __mlx4_qp_release_range(dev, base, count);
1737 return err;
1738 }
1739 set_param_l(out_param, base);
1740 break;
1741 case RES_OP_MAP_ICM:
1742 qpn = get_param_l(&in_param) & 0x7fffff;
1743 if (valid_reserved(dev, slave, qpn)) {
1744 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1745 if (err)
1746 return err;
1747 }
1748
1749 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1750 NULL, 1);
1751 if (err)
1752 return err;
1753
1754 if (!fw_reserved(dev, qpn)) {
1755 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1756 if (err) {
1757 res_abort_move(dev, slave, RES_QP, qpn);
1758 return err;
1759 }
1760 }
1761
1762 res_end_move(dev, slave, RES_QP, qpn);
1763 break;
1764
1765 default:
1766 err = -EINVAL;
1767 break;
1768 }
1769 return err;
1770 }
1771
1772 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1773 u64 in_param, u64 *out_param)
1774 {
1775 int err = -EINVAL;
1776 int base;
1777 int order;
1778
1779 if (op != RES_OP_RESERVE_AND_MAP)
1780 return err;
1781
1782 order = get_param_l(&in_param);
1783
1784 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1785 if (err)
1786 return err;
1787
1788 base = __mlx4_alloc_mtt_range(dev, order);
1789 if (base == -1) {
1790 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1791 return -ENOMEM;
1792 }
1793
1794 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1795 if (err) {
1796 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1797 __mlx4_free_mtt_range(dev, base, order);
1798 } else {
1799 set_param_l(out_param, base);
1800 }
1801
1802 return err;
1803 }
1804
1805 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1806 u64 in_param, u64 *out_param)
1807 {
1808 int err = -EINVAL;
1809 int index;
1810 int id;
1811 struct res_mpt *mpt;
1812
1813 switch (op) {
1814 case RES_OP_RESERVE:
1815 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1816 if (err)
1817 break;
1818
1819 index = __mlx4_mpt_reserve(dev);
1820 if (index == -1) {
1821 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1822 break;
1823 }
1824 id = index & mpt_mask(dev);
1825
1826 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1827 if (err) {
1828 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1829 __mlx4_mpt_release(dev, index);
1830 break;
1831 }
1832 set_param_l(out_param, index);
1833 break;
1834 case RES_OP_MAP_ICM:
1835 index = get_param_l(&in_param);
1836 id = index & mpt_mask(dev);
1837 err = mr_res_start_move_to(dev, slave, id,
1838 RES_MPT_MAPPED, &mpt);
1839 if (err)
1840 return err;
1841
1842 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1843 if (err) {
1844 res_abort_move(dev, slave, RES_MPT, id);
1845 return err;
1846 }
1847
1848 res_end_move(dev, slave, RES_MPT, id);
1849 break;
1850 }
1851 return err;
1852 }
1853
1854 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1855 u64 in_param, u64 *out_param)
1856 {
1857 int cqn;
1858 int err;
1859
1860 switch (op) {
1861 case RES_OP_RESERVE_AND_MAP:
1862 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1863 if (err)
1864 break;
1865
1866 err = __mlx4_cq_alloc_icm(dev, &cqn);
1867 if (err) {
1868 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1869 break;
1870 }
1871
1872 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1873 if (err) {
1874 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1875 __mlx4_cq_free_icm(dev, cqn);
1876 break;
1877 }
1878
1879 set_param_l(out_param, cqn);
1880 break;
1881
1882 default:
1883 err = -EINVAL;
1884 }
1885
1886 return err;
1887 }
1888
1889 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1890 u64 in_param, u64 *out_param)
1891 {
1892 int srqn;
1893 int err;
1894
1895 switch (op) {
1896 case RES_OP_RESERVE_AND_MAP:
1897 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1898 if (err)
1899 break;
1900
1901 err = __mlx4_srq_alloc_icm(dev, &srqn);
1902 if (err) {
1903 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1904 break;
1905 }
1906
1907 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1908 if (err) {
1909 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1910 __mlx4_srq_free_icm(dev, srqn);
1911 break;
1912 }
1913
1914 set_param_l(out_param, srqn);
1915 break;
1916
1917 default:
1918 err = -EINVAL;
1919 }
1920
1921 return err;
1922 }
1923
1924 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1925 u8 smac_index, u64 *mac)
1926 {
1927 struct mlx4_priv *priv = mlx4_priv(dev);
1928 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1929 struct list_head *mac_list =
1930 &tracker->slave_list[slave].res_list[RES_MAC];
1931 struct mac_res *res, *tmp;
1932
1933 list_for_each_entry_safe(res, tmp, mac_list, list) {
1934 if (res->smac_index == smac_index && res->port == (u8) port) {
1935 *mac = res->mac;
1936 return 0;
1937 }
1938 }
1939 return -ENOENT;
1940 }
1941
1942 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1943 {
1944 struct mlx4_priv *priv = mlx4_priv(dev);
1945 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1946 struct list_head *mac_list =
1947 &tracker->slave_list[slave].res_list[RES_MAC];
1948 struct mac_res *res, *tmp;
1949
1950 list_for_each_entry_safe(res, tmp, mac_list, list) {
1951 if (res->mac == mac && res->port == (u8) port) {
1952 /* mac found. update ref count */
1953 ++res->ref_count;
1954 return 0;
1955 }
1956 }
1957
1958 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1959 return -EINVAL;
1960 res = kzalloc(sizeof *res, GFP_KERNEL);
1961 if (!res) {
1962 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1963 return -ENOMEM;
1964 }
1965 res->mac = mac;
1966 res->port = (u8) port;
1967 res->smac_index = smac_index;
1968 res->ref_count = 1;
1969 list_add_tail(&res->list,
1970 &tracker->slave_list[slave].res_list[RES_MAC]);
1971 return 0;
1972 }
1973
1974 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1975 int port)
1976 {
1977 struct mlx4_priv *priv = mlx4_priv(dev);
1978 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1979 struct list_head *mac_list =
1980 &tracker->slave_list[slave].res_list[RES_MAC];
1981 struct mac_res *res, *tmp;
1982
1983 list_for_each_entry_safe(res, tmp, mac_list, list) {
1984 if (res->mac == mac && res->port == (u8) port) {
1985 if (!--res->ref_count) {
1986 list_del(&res->list);
1987 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1988 kfree(res);
1989 }
1990 break;
1991 }
1992 }
1993 }
1994
1995 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1996 {
1997 struct mlx4_priv *priv = mlx4_priv(dev);
1998 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1999 struct list_head *mac_list =
2000 &tracker->slave_list[slave].res_list[RES_MAC];
2001 struct mac_res *res, *tmp;
2002 int i;
2003
2004 list_for_each_entry_safe(res, tmp, mac_list, list) {
2005 list_del(&res->list);
2006 /* dereference the mac the num times the slave referenced it */
2007 for (i = 0; i < res->ref_count; i++)
2008 __mlx4_unregister_mac(dev, res->port, res->mac);
2009 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2010 kfree(res);
2011 }
2012 }
2013
2014 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2015 u64 in_param, u64 *out_param, int in_port)
2016 {
2017 int err = -EINVAL;
2018 int port;
2019 u64 mac;
2020 u8 smac_index;
2021
2022 if (op != RES_OP_RESERVE_AND_MAP)
2023 return err;
2024
2025 port = !in_port ? get_param_l(out_param) : in_port;
2026 port = mlx4_slave_convert_port(
2027 dev, slave, port);
2028
2029 if (port < 0)
2030 return -EINVAL;
2031 mac = in_param;
2032
2033 err = __mlx4_register_mac(dev, port, mac);
2034 if (err >= 0) {
2035 smac_index = err;
2036 set_param_l(out_param, err);
2037 err = 0;
2038 }
2039
2040 if (!err) {
2041 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2042 if (err)
2043 __mlx4_unregister_mac(dev, port, mac);
2044 }
2045 return err;
2046 }
2047
2048 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2049 int port, int vlan_index)
2050 {
2051 struct mlx4_priv *priv = mlx4_priv(dev);
2052 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2053 struct list_head *vlan_list =
2054 &tracker->slave_list[slave].res_list[RES_VLAN];
2055 struct vlan_res *res, *tmp;
2056
2057 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2058 if (res->vlan == vlan && res->port == (u8) port) {
2059 /* vlan found. update ref count */
2060 ++res->ref_count;
2061 return 0;
2062 }
2063 }
2064
2065 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2066 return -EINVAL;
2067 res = kzalloc(sizeof(*res), GFP_KERNEL);
2068 if (!res) {
2069 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2070 return -ENOMEM;
2071 }
2072 res->vlan = vlan;
2073 res->port = (u8) port;
2074 res->vlan_index = vlan_index;
2075 res->ref_count = 1;
2076 list_add_tail(&res->list,
2077 &tracker->slave_list[slave].res_list[RES_VLAN]);
2078 return 0;
2079 }
2080
2081
2082 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2083 int port)
2084 {
2085 struct mlx4_priv *priv = mlx4_priv(dev);
2086 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2087 struct list_head *vlan_list =
2088 &tracker->slave_list[slave].res_list[RES_VLAN];
2089 struct vlan_res *res, *tmp;
2090
2091 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2092 if (res->vlan == vlan && res->port == (u8) port) {
2093 if (!--res->ref_count) {
2094 list_del(&res->list);
2095 mlx4_release_resource(dev, slave, RES_VLAN,
2096 1, port);
2097 kfree(res);
2098 }
2099 break;
2100 }
2101 }
2102 }
2103
2104 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2105 {
2106 struct mlx4_priv *priv = mlx4_priv(dev);
2107 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2108 struct list_head *vlan_list =
2109 &tracker->slave_list[slave].res_list[RES_VLAN];
2110 struct vlan_res *res, *tmp;
2111 int i;
2112
2113 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2114 list_del(&res->list);
2115 /* dereference the vlan the num times the slave referenced it */
2116 for (i = 0; i < res->ref_count; i++)
2117 __mlx4_unregister_vlan(dev, res->port, res->vlan);
2118 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2119 kfree(res);
2120 }
2121 }
2122
2123 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2124 u64 in_param, u64 *out_param, int in_port)
2125 {
2126 struct mlx4_priv *priv = mlx4_priv(dev);
2127 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2128 int err;
2129 u16 vlan;
2130 int vlan_index;
2131 int port;
2132
2133 port = !in_port ? get_param_l(out_param) : in_port;
2134
2135 if (!port || op != RES_OP_RESERVE_AND_MAP)
2136 return -EINVAL;
2137
2138 port = mlx4_slave_convert_port(
2139 dev, slave, port);
2140
2141 if (port < 0)
2142 return -EINVAL;
2143 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2144 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2145 slave_state[slave].old_vlan_api = true;
2146 return 0;
2147 }
2148
2149 vlan = (u16) in_param;
2150
2151 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2152 if (!err) {
2153 set_param_l(out_param, (u32) vlan_index);
2154 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2155 if (err)
2156 __mlx4_unregister_vlan(dev, port, vlan);
2157 }
2158 return err;
2159 }
2160
2161 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2162 u64 in_param, u64 *out_param, int port)
2163 {
2164 u32 index;
2165 int err;
2166
2167 if (op != RES_OP_RESERVE)
2168 return -EINVAL;
2169
2170 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2171 if (err)
2172 return err;
2173
2174 err = __mlx4_counter_alloc(dev, &index);
2175 if (err) {
2176 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2177 return err;
2178 }
2179
2180 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2181 if (err) {
2182 __mlx4_counter_free(dev, index);
2183 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2184 } else {
2185 set_param_l(out_param, index);
2186 }
2187
2188 return err;
2189 }
2190
2191 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2192 u64 in_param, u64 *out_param)
2193 {
2194 u32 xrcdn;
2195 int err;
2196
2197 if (op != RES_OP_RESERVE)
2198 return -EINVAL;
2199
2200 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2201 if (err)
2202 return err;
2203
2204 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2205 if (err)
2206 __mlx4_xrcd_free(dev, xrcdn);
2207 else
2208 set_param_l(out_param, xrcdn);
2209
2210 return err;
2211 }
2212
2213 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2214 struct mlx4_vhcr *vhcr,
2215 struct mlx4_cmd_mailbox *inbox,
2216 struct mlx4_cmd_mailbox *outbox,
2217 struct mlx4_cmd_info *cmd)
2218 {
2219 int err;
2220 int alop = vhcr->op_modifier;
2221
2222 switch (vhcr->in_modifier & 0xFF) {
2223 case RES_QP:
2224 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2225 vhcr->in_param, &vhcr->out_param);
2226 break;
2227
2228 case RES_MTT:
2229 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2230 vhcr->in_param, &vhcr->out_param);
2231 break;
2232
2233 case RES_MPT:
2234 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2235 vhcr->in_param, &vhcr->out_param);
2236 break;
2237
2238 case RES_CQ:
2239 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2240 vhcr->in_param, &vhcr->out_param);
2241 break;
2242
2243 case RES_SRQ:
2244 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2245 vhcr->in_param, &vhcr->out_param);
2246 break;
2247
2248 case RES_MAC:
2249 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2250 vhcr->in_param, &vhcr->out_param,
2251 (vhcr->in_modifier >> 8) & 0xFF);
2252 break;
2253
2254 case RES_VLAN:
2255 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2256 vhcr->in_param, &vhcr->out_param,
2257 (vhcr->in_modifier >> 8) & 0xFF);
2258 break;
2259
2260 case RES_COUNTER:
2261 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2262 vhcr->in_param, &vhcr->out_param, 0);
2263 break;
2264
2265 case RES_XRCD:
2266 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2267 vhcr->in_param, &vhcr->out_param);
2268 break;
2269
2270 default:
2271 err = -EINVAL;
2272 break;
2273 }
2274
2275 return err;
2276 }
2277
2278 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2279 u64 in_param)
2280 {
2281 int err;
2282 int count;
2283 int base;
2284 int qpn;
2285
2286 switch (op) {
2287 case RES_OP_RESERVE:
2288 base = get_param_l(&in_param) & 0x7fffff;
2289 count = get_param_h(&in_param);
2290 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2291 if (err)
2292 break;
2293 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2294 __mlx4_qp_release_range(dev, base, count);
2295 break;
2296 case RES_OP_MAP_ICM:
2297 qpn = get_param_l(&in_param) & 0x7fffff;
2298 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2299 NULL, 0);
2300 if (err)
2301 return err;
2302
2303 if (!fw_reserved(dev, qpn))
2304 __mlx4_qp_free_icm(dev, qpn);
2305
2306 res_end_move(dev, slave, RES_QP, qpn);
2307
2308 if (valid_reserved(dev, slave, qpn))
2309 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2310 break;
2311 default:
2312 err = -EINVAL;
2313 break;
2314 }
2315 return err;
2316 }
2317
2318 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2319 u64 in_param, u64 *out_param)
2320 {
2321 int err = -EINVAL;
2322 int base;
2323 int order;
2324
2325 if (op != RES_OP_RESERVE_AND_MAP)
2326 return err;
2327
2328 base = get_param_l(&in_param);
2329 order = get_param_h(&in_param);
2330 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2331 if (!err) {
2332 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2333 __mlx4_free_mtt_range(dev, base, order);
2334 }
2335 return err;
2336 }
2337
2338 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2339 u64 in_param)
2340 {
2341 int err = -EINVAL;
2342 int index;
2343 int id;
2344 struct res_mpt *mpt;
2345
2346 switch (op) {
2347 case RES_OP_RESERVE:
2348 index = get_param_l(&in_param);
2349 id = index & mpt_mask(dev);
2350 err = get_res(dev, slave, id, RES_MPT, &mpt);
2351 if (err)
2352 break;
2353 index = mpt->key;
2354 put_res(dev, slave, id, RES_MPT);
2355
2356 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2357 if (err)
2358 break;
2359 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2360 __mlx4_mpt_release(dev, index);
2361 break;
2362 case RES_OP_MAP_ICM:
2363 index = get_param_l(&in_param);
2364 id = index & mpt_mask(dev);
2365 err = mr_res_start_move_to(dev, slave, id,
2366 RES_MPT_RESERVED, &mpt);
2367 if (err)
2368 return err;
2369
2370 __mlx4_mpt_free_icm(dev, mpt->key);
2371 res_end_move(dev, slave, RES_MPT, id);
2372 return err;
2373 break;
2374 default:
2375 err = -EINVAL;
2376 break;
2377 }
2378 return err;
2379 }
2380
2381 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2382 u64 in_param, u64 *out_param)
2383 {
2384 int cqn;
2385 int err;
2386
2387 switch (op) {
2388 case RES_OP_RESERVE_AND_MAP:
2389 cqn = get_param_l(&in_param);
2390 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2391 if (err)
2392 break;
2393
2394 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2395 __mlx4_cq_free_icm(dev, cqn);
2396 break;
2397
2398 default:
2399 err = -EINVAL;
2400 break;
2401 }
2402
2403 return err;
2404 }
2405
2406 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2407 u64 in_param, u64 *out_param)
2408 {
2409 int srqn;
2410 int err;
2411
2412 switch (op) {
2413 case RES_OP_RESERVE_AND_MAP:
2414 srqn = get_param_l(&in_param);
2415 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2416 if (err)
2417 break;
2418
2419 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2420 __mlx4_srq_free_icm(dev, srqn);
2421 break;
2422
2423 default:
2424 err = -EINVAL;
2425 break;
2426 }
2427
2428 return err;
2429 }
2430
2431 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2432 u64 in_param, u64 *out_param, int in_port)
2433 {
2434 int port;
2435 int err = 0;
2436
2437 switch (op) {
2438 case RES_OP_RESERVE_AND_MAP:
2439 port = !in_port ? get_param_l(out_param) : in_port;
2440 port = mlx4_slave_convert_port(
2441 dev, slave, port);
2442
2443 if (port < 0)
2444 return -EINVAL;
2445 mac_del_from_slave(dev, slave, in_param, port);
2446 __mlx4_unregister_mac(dev, port, in_param);
2447 break;
2448 default:
2449 err = -EINVAL;
2450 break;
2451 }
2452
2453 return err;
2454
2455 }
2456
2457 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2458 u64 in_param, u64 *out_param, int port)
2459 {
2460 struct mlx4_priv *priv = mlx4_priv(dev);
2461 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2462 int err = 0;
2463
2464 port = mlx4_slave_convert_port(
2465 dev, slave, port);
2466
2467 if (port < 0)
2468 return -EINVAL;
2469 switch (op) {
2470 case RES_OP_RESERVE_AND_MAP:
2471 if (slave_state[slave].old_vlan_api)
2472 return 0;
2473 if (!port)
2474 return -EINVAL;
2475 vlan_del_from_slave(dev, slave, in_param, port);
2476 __mlx4_unregister_vlan(dev, port, in_param);
2477 break;
2478 default:
2479 err = -EINVAL;
2480 break;
2481 }
2482
2483 return err;
2484 }
2485
2486 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2487 u64 in_param, u64 *out_param)
2488 {
2489 int index;
2490 int err;
2491
2492 if (op != RES_OP_RESERVE)
2493 return -EINVAL;
2494
2495 index = get_param_l(&in_param);
2496 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2497 return 0;
2498
2499 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2500 if (err)
2501 return err;
2502
2503 __mlx4_counter_free(dev, index);
2504 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2505
2506 return err;
2507 }
2508
2509 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2510 u64 in_param, u64 *out_param)
2511 {
2512 int xrcdn;
2513 int err;
2514
2515 if (op != RES_OP_RESERVE)
2516 return -EINVAL;
2517
2518 xrcdn = get_param_l(&in_param);
2519 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2520 if (err)
2521 return err;
2522
2523 __mlx4_xrcd_free(dev, xrcdn);
2524
2525 return err;
2526 }
2527
2528 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2529 struct mlx4_vhcr *vhcr,
2530 struct mlx4_cmd_mailbox *inbox,
2531 struct mlx4_cmd_mailbox *outbox,
2532 struct mlx4_cmd_info *cmd)
2533 {
2534 int err = -EINVAL;
2535 int alop = vhcr->op_modifier;
2536
2537 switch (vhcr->in_modifier & 0xFF) {
2538 case RES_QP:
2539 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2540 vhcr->in_param);
2541 break;
2542
2543 case RES_MTT:
2544 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2545 vhcr->in_param, &vhcr->out_param);
2546 break;
2547
2548 case RES_MPT:
2549 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2550 vhcr->in_param);
2551 break;
2552
2553 case RES_CQ:
2554 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2555 vhcr->in_param, &vhcr->out_param);
2556 break;
2557
2558 case RES_SRQ:
2559 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2560 vhcr->in_param, &vhcr->out_param);
2561 break;
2562
2563 case RES_MAC:
2564 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2565 vhcr->in_param, &vhcr->out_param,
2566 (vhcr->in_modifier >> 8) & 0xFF);
2567 break;
2568
2569 case RES_VLAN:
2570 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2571 vhcr->in_param, &vhcr->out_param,
2572 (vhcr->in_modifier >> 8) & 0xFF);
2573 break;
2574
2575 case RES_COUNTER:
2576 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2577 vhcr->in_param, &vhcr->out_param);
2578 break;
2579
2580 case RES_XRCD:
2581 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2582 vhcr->in_param, &vhcr->out_param);
2583
2584 default:
2585 break;
2586 }
2587 return err;
2588 }
2589
2590 /* ugly but other choices are uglier */
2591 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2592 {
2593 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2594 }
2595
2596 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2597 {
2598 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2599 }
2600
2601 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2602 {
2603 return be32_to_cpu(mpt->mtt_sz);
2604 }
2605
2606 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2607 {
2608 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2609 }
2610
2611 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2612 {
2613 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2614 }
2615
2616 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2617 {
2618 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2619 }
2620
2621 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2622 {
2623 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2624 }
2625
2626 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2627 {
2628 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2629 }
2630
2631 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2632 {
2633 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2634 }
2635
2636 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2637 {
2638 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2639 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2640 int log_sq_sride = qpc->sq_size_stride & 7;
2641 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2642 int log_rq_stride = qpc->rq_size_stride & 7;
2643 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2644 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2645 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2646 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2647 int sq_size;
2648 int rq_size;
2649 int total_pages;
2650 int total_mem;
2651 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2652
2653 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2654 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2655 total_mem = sq_size + rq_size;
2656 total_pages =
2657 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2658 page_shift);
2659
2660 return total_pages;
2661 }
2662
2663 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2664 int size, struct res_mtt *mtt)
2665 {
2666 int res_start = mtt->com.res_id;
2667 int res_size = (1 << mtt->order);
2668
2669 if (start < res_start || start + size > res_start + res_size)
2670 return -EPERM;
2671 return 0;
2672 }
2673
2674 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2675 struct mlx4_vhcr *vhcr,
2676 struct mlx4_cmd_mailbox *inbox,
2677 struct mlx4_cmd_mailbox *outbox,
2678 struct mlx4_cmd_info *cmd)
2679 {
2680 int err;
2681 int index = vhcr->in_modifier;
2682 struct res_mtt *mtt;
2683 struct res_mpt *mpt;
2684 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2685 int phys;
2686 int id;
2687 u32 pd;
2688 int pd_slave;
2689
2690 id = index & mpt_mask(dev);
2691 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2692 if (err)
2693 return err;
2694
2695 /* Disable memory windows for VFs. */
2696 if (!mr_is_region(inbox->buf)) {
2697 err = -EPERM;
2698 goto ex_abort;
2699 }
2700
2701 /* Make sure that the PD bits related to the slave id are zeros. */
2702 pd = mr_get_pd(inbox->buf);
2703 pd_slave = (pd >> 17) & 0x7f;
2704 if (pd_slave != 0 && --pd_slave != slave) {
2705 err = -EPERM;
2706 goto ex_abort;
2707 }
2708
2709 if (mr_is_fmr(inbox->buf)) {
2710 /* FMR and Bind Enable are forbidden in slave devices. */
2711 if (mr_is_bind_enabled(inbox->buf)) {
2712 err = -EPERM;
2713 goto ex_abort;
2714 }
2715 /* FMR and Memory Windows are also forbidden. */
2716 if (!mr_is_region(inbox->buf)) {
2717 err = -EPERM;
2718 goto ex_abort;
2719 }
2720 }
2721
2722 phys = mr_phys_mpt(inbox->buf);
2723 if (!phys) {
2724 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2725 if (err)
2726 goto ex_abort;
2727
2728 err = check_mtt_range(dev, slave, mtt_base,
2729 mr_get_mtt_size(inbox->buf), mtt);
2730 if (err)
2731 goto ex_put;
2732
2733 mpt->mtt = mtt;
2734 }
2735
2736 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2737 if (err)
2738 goto ex_put;
2739
2740 if (!phys) {
2741 atomic_inc(&mtt->ref_count);
2742 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2743 }
2744
2745 res_end_move(dev, slave, RES_MPT, id);
2746 return 0;
2747
2748 ex_put:
2749 if (!phys)
2750 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2751 ex_abort:
2752 res_abort_move(dev, slave, RES_MPT, id);
2753
2754 return err;
2755 }
2756
2757 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2758 struct mlx4_vhcr *vhcr,
2759 struct mlx4_cmd_mailbox *inbox,
2760 struct mlx4_cmd_mailbox *outbox,
2761 struct mlx4_cmd_info *cmd)
2762 {
2763 int err;
2764 int index = vhcr->in_modifier;
2765 struct res_mpt *mpt;
2766 int id;
2767
2768 id = index & mpt_mask(dev);
2769 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2770 if (err)
2771 return err;
2772
2773 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2774 if (err)
2775 goto ex_abort;
2776
2777 if (mpt->mtt)
2778 atomic_dec(&mpt->mtt->ref_count);
2779
2780 res_end_move(dev, slave, RES_MPT, id);
2781 return 0;
2782
2783 ex_abort:
2784 res_abort_move(dev, slave, RES_MPT, id);
2785
2786 return err;
2787 }
2788
2789 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2790 struct mlx4_vhcr *vhcr,
2791 struct mlx4_cmd_mailbox *inbox,
2792 struct mlx4_cmd_mailbox *outbox,
2793 struct mlx4_cmd_info *cmd)
2794 {
2795 int err;
2796 int index = vhcr->in_modifier;
2797 struct res_mpt *mpt;
2798 int id;
2799
2800 id = index & mpt_mask(dev);
2801 err = get_res(dev, slave, id, RES_MPT, &mpt);
2802 if (err)
2803 return err;
2804
2805 if (mpt->com.from_state == RES_MPT_MAPPED) {
2806 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2807 * that, the VF must read the MPT. But since the MPT entry memory is not
2808 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2809 * entry contents. To guarantee that the MPT cannot be changed, the driver
2810 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2811 * ownership fofollowing the change. The change here allows the VF to
2812 * perform QUERY_MPT also when the entry is in SW ownership.
2813 */
2814 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2815 &mlx4_priv(dev)->mr_table.dmpt_table,
2816 mpt->key, NULL);
2817
2818 if (NULL == mpt_entry || NULL == outbox->buf) {
2819 err = -EINVAL;
2820 goto out;
2821 }
2822
2823 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2824
2825 err = 0;
2826 } else if (mpt->com.from_state == RES_MPT_HW) {
2827 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2828 } else {
2829 err = -EBUSY;
2830 goto out;
2831 }
2832
2833
2834 out:
2835 put_res(dev, slave, id, RES_MPT);
2836 return err;
2837 }
2838
2839 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2840 {
2841 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2842 }
2843
2844 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2845 {
2846 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2847 }
2848
2849 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2850 {
2851 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2852 }
2853
2854 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2855 struct mlx4_qp_context *context)
2856 {
2857 u32 qpn = vhcr->in_modifier & 0xffffff;
2858 u32 qkey = 0;
2859
2860 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2861 return;
2862
2863 /* adjust qkey in qp context */
2864 context->qkey = cpu_to_be32(qkey);
2865 }
2866
2867 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2868 struct mlx4_qp_context *qpc,
2869 struct mlx4_cmd_mailbox *inbox);
2870
2871 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2872 struct mlx4_vhcr *vhcr,
2873 struct mlx4_cmd_mailbox *inbox,
2874 struct mlx4_cmd_mailbox *outbox,
2875 struct mlx4_cmd_info *cmd)
2876 {
2877 int err;
2878 int qpn = vhcr->in_modifier & 0x7fffff;
2879 struct res_mtt *mtt;
2880 struct res_qp *qp;
2881 struct mlx4_qp_context *qpc = inbox->buf + 8;
2882 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2883 int mtt_size = qp_get_mtt_size(qpc);
2884 struct res_cq *rcq;
2885 struct res_cq *scq;
2886 int rcqn = qp_get_rcqn(qpc);
2887 int scqn = qp_get_scqn(qpc);
2888 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2889 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2890 struct res_srq *srq;
2891 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2892
2893 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2894 if (err)
2895 return err;
2896
2897 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2898 if (err)
2899 return err;
2900 qp->local_qpn = local_qpn;
2901 qp->sched_queue = 0;
2902 qp->param3 = 0;
2903 qp->vlan_control = 0;
2904 qp->fvl_rx = 0;
2905 qp->pri_path_fl = 0;
2906 qp->vlan_index = 0;
2907 qp->feup = 0;
2908 qp->qpc_flags = be32_to_cpu(qpc->flags);
2909
2910 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2911 if (err)
2912 goto ex_abort;
2913
2914 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2915 if (err)
2916 goto ex_put_mtt;
2917
2918 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2919 if (err)
2920 goto ex_put_mtt;
2921
2922 if (scqn != rcqn) {
2923 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2924 if (err)
2925 goto ex_put_rcq;
2926 } else
2927 scq = rcq;
2928
2929 if (use_srq) {
2930 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2931 if (err)
2932 goto ex_put_scq;
2933 }
2934
2935 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2936 update_pkey_index(dev, slave, inbox);
2937 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2938 if (err)
2939 goto ex_put_srq;
2940 atomic_inc(&mtt->ref_count);
2941 qp->mtt = mtt;
2942 atomic_inc(&rcq->ref_count);
2943 qp->rcq = rcq;
2944 atomic_inc(&scq->ref_count);
2945 qp->scq = scq;
2946
2947 if (scqn != rcqn)
2948 put_res(dev, slave, scqn, RES_CQ);
2949
2950 if (use_srq) {
2951 atomic_inc(&srq->ref_count);
2952 put_res(dev, slave, srqn, RES_SRQ);
2953 qp->srq = srq;
2954 }
2955 put_res(dev, slave, rcqn, RES_CQ);
2956 put_res(dev, slave, mtt_base, RES_MTT);
2957 res_end_move(dev, slave, RES_QP, qpn);
2958
2959 return 0;
2960
2961 ex_put_srq:
2962 if (use_srq)
2963 put_res(dev, slave, srqn, RES_SRQ);
2964 ex_put_scq:
2965 if (scqn != rcqn)
2966 put_res(dev, slave, scqn, RES_CQ);
2967 ex_put_rcq:
2968 put_res(dev, slave, rcqn, RES_CQ);
2969 ex_put_mtt:
2970 put_res(dev, slave, mtt_base, RES_MTT);
2971 ex_abort:
2972 res_abort_move(dev, slave, RES_QP, qpn);
2973
2974 return err;
2975 }
2976
2977 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2978 {
2979 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2980 }
2981
2982 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2983 {
2984 int log_eq_size = eqc->log_eq_size & 0x1f;
2985 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2986
2987 if (log_eq_size + 5 < page_shift)
2988 return 1;
2989
2990 return 1 << (log_eq_size + 5 - page_shift);
2991 }
2992
2993 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2994 {
2995 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2996 }
2997
2998 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2999 {
3000 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3001 int page_shift = (cqc->log_page_size & 0x3f) + 12;
3002
3003 if (log_cq_size + 5 < page_shift)
3004 return 1;
3005
3006 return 1 << (log_cq_size + 5 - page_shift);
3007 }
3008
3009 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3010 struct mlx4_vhcr *vhcr,
3011 struct mlx4_cmd_mailbox *inbox,
3012 struct mlx4_cmd_mailbox *outbox,
3013 struct mlx4_cmd_info *cmd)
3014 {
3015 int err;
3016 int eqn = vhcr->in_modifier;
3017 int res_id = (slave << 10) | eqn;
3018 struct mlx4_eq_context *eqc = inbox->buf;
3019 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3020 int mtt_size = eq_get_mtt_size(eqc);
3021 struct res_eq *eq;
3022 struct res_mtt *mtt;
3023
3024 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3025 if (err)
3026 return err;
3027 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3028 if (err)
3029 goto out_add;
3030
3031 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3032 if (err)
3033 goto out_move;
3034
3035 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3036 if (err)
3037 goto out_put;
3038
3039 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3040 if (err)
3041 goto out_put;
3042
3043 atomic_inc(&mtt->ref_count);
3044 eq->mtt = mtt;
3045 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3046 res_end_move(dev, slave, RES_EQ, res_id);
3047 return 0;
3048
3049 out_put:
3050 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3051 out_move:
3052 res_abort_move(dev, slave, RES_EQ, res_id);
3053 out_add:
3054 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3055 return err;
3056 }
3057
3058 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3059 struct mlx4_vhcr *vhcr,
3060 struct mlx4_cmd_mailbox *inbox,
3061 struct mlx4_cmd_mailbox *outbox,
3062 struct mlx4_cmd_info *cmd)
3063 {
3064 int err;
3065 u8 get = vhcr->op_modifier;
3066
3067 if (get != 1)
3068 return -EPERM;
3069
3070 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3071
3072 return err;
3073 }
3074
3075 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3076 int len, struct res_mtt **res)
3077 {
3078 struct mlx4_priv *priv = mlx4_priv(dev);
3079 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3080 struct res_mtt *mtt;
3081 int err = -EINVAL;
3082
3083 spin_lock_irq(mlx4_tlock(dev));
3084 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3085 com.list) {
3086 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3087 *res = mtt;
3088 mtt->com.from_state = mtt->com.state;
3089 mtt->com.state = RES_MTT_BUSY;
3090 err = 0;
3091 break;
3092 }
3093 }
3094 spin_unlock_irq(mlx4_tlock(dev));
3095
3096 return err;
3097 }
3098
3099 static int verify_qp_parameters(struct mlx4_dev *dev,
3100 struct mlx4_vhcr *vhcr,
3101 struct mlx4_cmd_mailbox *inbox,
3102 enum qp_transition transition, u8 slave)
3103 {
3104 u32 qp_type;
3105 u32 qpn;
3106 struct mlx4_qp_context *qp_ctx;
3107 enum mlx4_qp_optpar optpar;
3108 int port;
3109 int num_gids;
3110
3111 qp_ctx = inbox->buf + 8;
3112 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3113 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
3114
3115 if (slave != mlx4_master_func_num(dev)) {
3116 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3117 /* setting QP rate-limit is disallowed for VFs */
3118 if (qp_ctx->rate_limit_params)
3119 return -EPERM;
3120 }
3121
3122 switch (qp_type) {
3123 case MLX4_QP_ST_RC:
3124 case MLX4_QP_ST_XRC:
3125 case MLX4_QP_ST_UC:
3126 switch (transition) {
3127 case QP_TRANS_INIT2RTR:
3128 case QP_TRANS_RTR2RTS:
3129 case QP_TRANS_RTS2RTS:
3130 case QP_TRANS_SQD2SQD:
3131 case QP_TRANS_SQD2RTS:
3132 if (slave != mlx4_master_func_num(dev))
3133 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3134 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3135 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3136 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3137 else
3138 num_gids = 1;
3139 if (qp_ctx->pri_path.mgid_index >= num_gids)
3140 return -EINVAL;
3141 }
3142 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3143 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3144 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3145 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3146 else
3147 num_gids = 1;
3148 if (qp_ctx->alt_path.mgid_index >= num_gids)
3149 return -EINVAL;
3150 }
3151 break;
3152 default:
3153 break;
3154 }
3155 break;
3156
3157 case MLX4_QP_ST_MLX:
3158 qpn = vhcr->in_modifier & 0x7fffff;
3159 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3160 if (transition == QP_TRANS_INIT2RTR &&
3161 slave != mlx4_master_func_num(dev) &&
3162 mlx4_is_qp_reserved(dev, qpn) &&
3163 !mlx4_vf_smi_enabled(dev, slave, port)) {
3164 /* only enabled VFs may create MLX proxy QPs */
3165 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3166 __func__, slave, port);
3167 return -EPERM;
3168 }
3169 break;
3170
3171 default:
3172 break;
3173 }
3174
3175 return 0;
3176 }
3177
3178 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3179 struct mlx4_vhcr *vhcr,
3180 struct mlx4_cmd_mailbox *inbox,
3181 struct mlx4_cmd_mailbox *outbox,
3182 struct mlx4_cmd_info *cmd)
3183 {
3184 struct mlx4_mtt mtt;
3185 __be64 *page_list = inbox->buf;
3186 u64 *pg_list = (u64 *)page_list;
3187 int i;
3188 struct res_mtt *rmtt = NULL;
3189 int start = be64_to_cpu(page_list[0]);
3190 int npages = vhcr->in_modifier;
3191 int err;
3192
3193 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3194 if (err)
3195 return err;
3196
3197 /* Call the SW implementation of write_mtt:
3198 * - Prepare a dummy mtt struct
3199 * - Translate inbox contents to simple addresses in host endianness */
3200 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3201 we don't really use it */
3202 mtt.order = 0;
3203 mtt.page_shift = 0;
3204 for (i = 0; i < npages; ++i)
3205 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3206
3207 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3208 ((u64 *)page_list + 2));
3209
3210 if (rmtt)
3211 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3212
3213 return err;
3214 }
3215
3216 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3217 struct mlx4_vhcr *vhcr,
3218 struct mlx4_cmd_mailbox *inbox,
3219 struct mlx4_cmd_mailbox *outbox,
3220 struct mlx4_cmd_info *cmd)
3221 {
3222 int eqn = vhcr->in_modifier;
3223 int res_id = eqn | (slave << 10);
3224 struct res_eq *eq;
3225 int err;
3226
3227 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3228 if (err)
3229 return err;
3230
3231 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3232 if (err)
3233 goto ex_abort;
3234
3235 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3236 if (err)
3237 goto ex_put;
3238
3239 atomic_dec(&eq->mtt->ref_count);
3240 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3241 res_end_move(dev, slave, RES_EQ, res_id);
3242 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3243
3244 return 0;
3245
3246 ex_put:
3247 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3248 ex_abort:
3249 res_abort_move(dev, slave, RES_EQ, res_id);
3250
3251 return err;
3252 }
3253
3254 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3255 {
3256 struct mlx4_priv *priv = mlx4_priv(dev);
3257 struct mlx4_slave_event_eq_info *event_eq;
3258 struct mlx4_cmd_mailbox *mailbox;
3259 u32 in_modifier = 0;
3260 int err;
3261 int res_id;
3262 struct res_eq *req;
3263
3264 if (!priv->mfunc.master.slave_state)
3265 return -EINVAL;
3266
3267 /* check for slave valid, slave not PF, and slave active */
3268 if (slave < 0 || slave > dev->persist->num_vfs ||
3269 slave == dev->caps.function ||
3270 !priv->mfunc.master.slave_state[slave].active)
3271 return 0;
3272
3273 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3274
3275 /* Create the event only if the slave is registered */
3276 if (event_eq->eqn < 0)
3277 return 0;
3278
3279 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3280 res_id = (slave << 10) | event_eq->eqn;
3281 err = get_res(dev, slave, res_id, RES_EQ, &req);
3282 if (err)
3283 goto unlock;
3284
3285 if (req->com.from_state != RES_EQ_HW) {
3286 err = -EINVAL;
3287 goto put;
3288 }
3289
3290 mailbox = mlx4_alloc_cmd_mailbox(dev);
3291 if (IS_ERR(mailbox)) {
3292 err = PTR_ERR(mailbox);
3293 goto put;
3294 }
3295
3296 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3297 ++event_eq->token;
3298 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3299 }
3300
3301 memcpy(mailbox->buf, (u8 *) eqe, 28);
3302
3303 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3304
3305 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3306 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3307 MLX4_CMD_NATIVE);
3308
3309 put_res(dev, slave, res_id, RES_EQ);
3310 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3311 mlx4_free_cmd_mailbox(dev, mailbox);
3312 return err;
3313
3314 put:
3315 put_res(dev, slave, res_id, RES_EQ);
3316
3317 unlock:
3318 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3319 return err;
3320 }
3321
3322 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3323 struct mlx4_vhcr *vhcr,
3324 struct mlx4_cmd_mailbox *inbox,
3325 struct mlx4_cmd_mailbox *outbox,
3326 struct mlx4_cmd_info *cmd)
3327 {
3328 int eqn = vhcr->in_modifier;
3329 int res_id = eqn | (slave << 10);
3330 struct res_eq *eq;
3331 int err;
3332
3333 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3334 if (err)
3335 return err;
3336
3337 if (eq->com.from_state != RES_EQ_HW) {
3338 err = -EINVAL;
3339 goto ex_put;
3340 }
3341
3342 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3343
3344 ex_put:
3345 put_res(dev, slave, res_id, RES_EQ);
3346 return err;
3347 }
3348
3349 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3350 struct mlx4_vhcr *vhcr,
3351 struct mlx4_cmd_mailbox *inbox,
3352 struct mlx4_cmd_mailbox *outbox,
3353 struct mlx4_cmd_info *cmd)
3354 {
3355 int err;
3356 int cqn = vhcr->in_modifier;
3357 struct mlx4_cq_context *cqc = inbox->buf;
3358 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3359 struct res_cq *cq = NULL;
3360 struct res_mtt *mtt;
3361
3362 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3363 if (err)
3364 return err;
3365 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3366 if (err)
3367 goto out_move;
3368 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3369 if (err)
3370 goto out_put;
3371 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3372 if (err)
3373 goto out_put;
3374 atomic_inc(&mtt->ref_count);
3375 cq->mtt = mtt;
3376 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3377 res_end_move(dev, slave, RES_CQ, cqn);
3378 return 0;
3379
3380 out_put:
3381 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3382 out_move:
3383 res_abort_move(dev, slave, RES_CQ, cqn);
3384 return err;
3385 }
3386
3387 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3388 struct mlx4_vhcr *vhcr,
3389 struct mlx4_cmd_mailbox *inbox,
3390 struct mlx4_cmd_mailbox *outbox,
3391 struct mlx4_cmd_info *cmd)
3392 {
3393 int err;
3394 int cqn = vhcr->in_modifier;
3395 struct res_cq *cq = NULL;
3396
3397 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3398 if (err)
3399 return err;
3400 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3401 if (err)
3402 goto out_move;
3403 atomic_dec(&cq->mtt->ref_count);
3404 res_end_move(dev, slave, RES_CQ, cqn);
3405 return 0;
3406
3407 out_move:
3408 res_abort_move(dev, slave, RES_CQ, cqn);
3409 return err;
3410 }
3411
3412 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3413 struct mlx4_vhcr *vhcr,
3414 struct mlx4_cmd_mailbox *inbox,
3415 struct mlx4_cmd_mailbox *outbox,
3416 struct mlx4_cmd_info *cmd)
3417 {
3418 int cqn = vhcr->in_modifier;
3419 struct res_cq *cq;
3420 int err;
3421
3422 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3423 if (err)
3424 return err;
3425
3426 if (cq->com.from_state != RES_CQ_HW)
3427 goto ex_put;
3428
3429 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3430 ex_put:
3431 put_res(dev, slave, cqn, RES_CQ);
3432
3433 return err;
3434 }
3435
3436 static int handle_resize(struct mlx4_dev *dev, int slave,
3437 struct mlx4_vhcr *vhcr,
3438 struct mlx4_cmd_mailbox *inbox,
3439 struct mlx4_cmd_mailbox *outbox,
3440 struct mlx4_cmd_info *cmd,
3441 struct res_cq *cq)
3442 {
3443 int err;
3444 struct res_mtt *orig_mtt;
3445 struct res_mtt *mtt;
3446 struct mlx4_cq_context *cqc = inbox->buf;
3447 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3448
3449 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3450 if (err)
3451 return err;
3452
3453 if (orig_mtt != cq->mtt) {
3454 err = -EINVAL;
3455 goto ex_put;
3456 }
3457
3458 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3459 if (err)
3460 goto ex_put;
3461
3462 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3463 if (err)
3464 goto ex_put1;
3465 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3466 if (err)
3467 goto ex_put1;
3468 atomic_dec(&orig_mtt->ref_count);
3469 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3470 atomic_inc(&mtt->ref_count);
3471 cq->mtt = mtt;
3472 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3473 return 0;
3474
3475 ex_put1:
3476 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3477 ex_put:
3478 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3479
3480 return err;
3481
3482 }
3483
3484 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3485 struct mlx4_vhcr *vhcr,
3486 struct mlx4_cmd_mailbox *inbox,
3487 struct mlx4_cmd_mailbox *outbox,
3488 struct mlx4_cmd_info *cmd)
3489 {
3490 int cqn = vhcr->in_modifier;
3491 struct res_cq *cq;
3492 int err;
3493
3494 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3495 if (err)
3496 return err;
3497
3498 if (cq->com.from_state != RES_CQ_HW)
3499 goto ex_put;
3500
3501 if (vhcr->op_modifier == 0) {
3502 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3503 goto ex_put;
3504 }
3505
3506 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3507 ex_put:
3508 put_res(dev, slave, cqn, RES_CQ);
3509
3510 return err;
3511 }
3512
3513 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3514 {
3515 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3516 int log_rq_stride = srqc->logstride & 7;
3517 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3518
3519 if (log_srq_size + log_rq_stride + 4 < page_shift)
3520 return 1;
3521
3522 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3523 }
3524
3525 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3526 struct mlx4_vhcr *vhcr,
3527 struct mlx4_cmd_mailbox *inbox,
3528 struct mlx4_cmd_mailbox *outbox,
3529 struct mlx4_cmd_info *cmd)
3530 {
3531 int err;
3532 int srqn = vhcr->in_modifier;
3533 struct res_mtt *mtt;
3534 struct res_srq *srq = NULL;
3535 struct mlx4_srq_context *srqc = inbox->buf;
3536 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3537
3538 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3539 return -EINVAL;
3540
3541 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3542 if (err)
3543 return err;
3544 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3545 if (err)
3546 goto ex_abort;
3547 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3548 mtt);
3549 if (err)
3550 goto ex_put_mtt;
3551
3552 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3553 if (err)
3554 goto ex_put_mtt;
3555
3556 atomic_inc(&mtt->ref_count);
3557 srq->mtt = mtt;
3558 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3559 res_end_move(dev, slave, RES_SRQ, srqn);
3560 return 0;
3561
3562 ex_put_mtt:
3563 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3564 ex_abort:
3565 res_abort_move(dev, slave, RES_SRQ, srqn);
3566
3567 return err;
3568 }
3569
3570 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3571 struct mlx4_vhcr *vhcr,
3572 struct mlx4_cmd_mailbox *inbox,
3573 struct mlx4_cmd_mailbox *outbox,
3574 struct mlx4_cmd_info *cmd)
3575 {
3576 int err;
3577 int srqn = vhcr->in_modifier;
3578 struct res_srq *srq = NULL;
3579
3580 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3581 if (err)
3582 return err;
3583 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3584 if (err)
3585 goto ex_abort;
3586 atomic_dec(&srq->mtt->ref_count);
3587 if (srq->cq)
3588 atomic_dec(&srq->cq->ref_count);
3589 res_end_move(dev, slave, RES_SRQ, srqn);
3590
3591 return 0;
3592
3593 ex_abort:
3594 res_abort_move(dev, slave, RES_SRQ, srqn);
3595
3596 return err;
3597 }
3598
3599 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3600 struct mlx4_vhcr *vhcr,
3601 struct mlx4_cmd_mailbox *inbox,
3602 struct mlx4_cmd_mailbox *outbox,
3603 struct mlx4_cmd_info *cmd)
3604 {
3605 int err;
3606 int srqn = vhcr->in_modifier;
3607 struct res_srq *srq;
3608
3609 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3610 if (err)
3611 return err;
3612 if (srq->com.from_state != RES_SRQ_HW) {
3613 err = -EBUSY;
3614 goto out;
3615 }
3616 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3617 out:
3618 put_res(dev, slave, srqn, RES_SRQ);
3619 return err;
3620 }
3621
3622 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3623 struct mlx4_vhcr *vhcr,
3624 struct mlx4_cmd_mailbox *inbox,
3625 struct mlx4_cmd_mailbox *outbox,
3626 struct mlx4_cmd_info *cmd)
3627 {
3628 int err;
3629 int srqn = vhcr->in_modifier;
3630 struct res_srq *srq;
3631
3632 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3633 if (err)
3634 return err;
3635
3636 if (srq->com.from_state != RES_SRQ_HW) {
3637 err = -EBUSY;
3638 goto out;
3639 }
3640
3641 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3642 out:
3643 put_res(dev, slave, srqn, RES_SRQ);
3644 return err;
3645 }
3646
3647 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3648 struct mlx4_vhcr *vhcr,
3649 struct mlx4_cmd_mailbox *inbox,
3650 struct mlx4_cmd_mailbox *outbox,
3651 struct mlx4_cmd_info *cmd)
3652 {
3653 int err;
3654 int qpn = vhcr->in_modifier & 0x7fffff;
3655 struct res_qp *qp;
3656
3657 err = get_res(dev, slave, qpn, RES_QP, &qp);
3658 if (err)
3659 return err;
3660 if (qp->com.from_state != RES_QP_HW) {
3661 err = -EBUSY;
3662 goto out;
3663 }
3664
3665 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3666 out:
3667 put_res(dev, slave, qpn, RES_QP);
3668 return err;
3669 }
3670
3671 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3672 struct mlx4_vhcr *vhcr,
3673 struct mlx4_cmd_mailbox *inbox,
3674 struct mlx4_cmd_mailbox *outbox,
3675 struct mlx4_cmd_info *cmd)
3676 {
3677 struct mlx4_qp_context *context = inbox->buf + 8;
3678 adjust_proxy_tun_qkey(dev, vhcr, context);
3679 update_pkey_index(dev, slave, inbox);
3680 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3681 }
3682
3683 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3684 struct mlx4_qp_context *qpc,
3685 struct mlx4_cmd_mailbox *inbox)
3686 {
3687 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3688 u8 pri_sched_queue;
3689 int port = mlx4_slave_convert_port(
3690 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3691
3692 if (port < 0)
3693 return -EINVAL;
3694
3695 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3696 ((port & 1) << 6);
3697
3698 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3699 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3700 qpc->pri_path.sched_queue = pri_sched_queue;
3701 }
3702
3703 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3704 port = mlx4_slave_convert_port(
3705 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3706 + 1) - 1;
3707 if (port < 0)
3708 return -EINVAL;
3709 qpc->alt_path.sched_queue =
3710 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3711 (port & 1) << 6;
3712 }
3713 return 0;
3714 }
3715
3716 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3717 struct mlx4_qp_context *qpc,
3718 struct mlx4_cmd_mailbox *inbox)
3719 {
3720 u64 mac;
3721 int port;
3722 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3723 u8 sched = *(u8 *)(inbox->buf + 64);
3724 u8 smac_ix;
3725
3726 port = (sched >> 6 & 1) + 1;
3727 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3728 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3729 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3730 return -ENOENT;
3731 }
3732 return 0;
3733 }
3734
3735 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3736 struct mlx4_vhcr *vhcr,
3737 struct mlx4_cmd_mailbox *inbox,
3738 struct mlx4_cmd_mailbox *outbox,
3739 struct mlx4_cmd_info *cmd)
3740 {
3741 int err;
3742 struct mlx4_qp_context *qpc = inbox->buf + 8;
3743 int qpn = vhcr->in_modifier & 0x7fffff;
3744 struct res_qp *qp;
3745 u8 orig_sched_queue;
3746 __be32 orig_param3 = qpc->param3;
3747 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3748 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3749 u8 orig_pri_path_fl = qpc->pri_path.fl;
3750 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3751 u8 orig_feup = qpc->pri_path.feup;
3752
3753 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3754 if (err)
3755 return err;
3756 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3757 if (err)
3758 return err;
3759
3760 if (roce_verify_mac(dev, slave, qpc, inbox))
3761 return -EINVAL;
3762
3763 update_pkey_index(dev, slave, inbox);
3764 update_gid(dev, inbox, (u8)slave);
3765 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3766 orig_sched_queue = qpc->pri_path.sched_queue;
3767 err = update_vport_qp_param(dev, inbox, slave, qpn);
3768 if (err)
3769 return err;
3770
3771 err = get_res(dev, slave, qpn, RES_QP, &qp);
3772 if (err)
3773 return err;
3774 if (qp->com.from_state != RES_QP_HW) {
3775 err = -EBUSY;
3776 goto out;
3777 }
3778
3779 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3780 out:
3781 /* if no error, save sched queue value passed in by VF. This is
3782 * essentially the QOS value provided by the VF. This will be useful
3783 * if we allow dynamic changes from VST back to VGT
3784 */
3785 if (!err) {
3786 qp->sched_queue = orig_sched_queue;
3787 qp->param3 = orig_param3;
3788 qp->vlan_control = orig_vlan_control;
3789 qp->fvl_rx = orig_fvl_rx;
3790 qp->pri_path_fl = orig_pri_path_fl;
3791 qp->vlan_index = orig_vlan_index;
3792 qp->feup = orig_feup;
3793 }
3794 put_res(dev, slave, qpn, RES_QP);
3795 return err;
3796 }
3797
3798 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3799 struct mlx4_vhcr *vhcr,
3800 struct mlx4_cmd_mailbox *inbox,
3801 struct mlx4_cmd_mailbox *outbox,
3802 struct mlx4_cmd_info *cmd)
3803 {
3804 int err;
3805 struct mlx4_qp_context *context = inbox->buf + 8;
3806
3807 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3808 if (err)
3809 return err;
3810 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3811 if (err)
3812 return err;
3813
3814 update_pkey_index(dev, slave, inbox);
3815 update_gid(dev, inbox, (u8)slave);
3816 adjust_proxy_tun_qkey(dev, vhcr, context);
3817 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3818 }
3819
3820 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3821 struct mlx4_vhcr *vhcr,
3822 struct mlx4_cmd_mailbox *inbox,
3823 struct mlx4_cmd_mailbox *outbox,
3824 struct mlx4_cmd_info *cmd)
3825 {
3826 int err;
3827 struct mlx4_qp_context *context = inbox->buf + 8;
3828
3829 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3830 if (err)
3831 return err;
3832 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3833 if (err)
3834 return err;
3835
3836 update_pkey_index(dev, slave, inbox);
3837 update_gid(dev, inbox, (u8)slave);
3838 adjust_proxy_tun_qkey(dev, vhcr, context);
3839 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3840 }
3841
3842
3843 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3844 struct mlx4_vhcr *vhcr,
3845 struct mlx4_cmd_mailbox *inbox,
3846 struct mlx4_cmd_mailbox *outbox,
3847 struct mlx4_cmd_info *cmd)
3848 {
3849 struct mlx4_qp_context *context = inbox->buf + 8;
3850 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3851 if (err)
3852 return err;
3853 adjust_proxy_tun_qkey(dev, vhcr, context);
3854 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3855 }
3856
3857 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3858 struct mlx4_vhcr *vhcr,
3859 struct mlx4_cmd_mailbox *inbox,
3860 struct mlx4_cmd_mailbox *outbox,
3861 struct mlx4_cmd_info *cmd)
3862 {
3863 int err;
3864 struct mlx4_qp_context *context = inbox->buf + 8;
3865
3866 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3867 if (err)
3868 return err;
3869 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3870 if (err)
3871 return err;
3872
3873 adjust_proxy_tun_qkey(dev, vhcr, context);
3874 update_gid(dev, inbox, (u8)slave);
3875 update_pkey_index(dev, slave, inbox);
3876 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3877 }
3878
3879 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3880 struct mlx4_vhcr *vhcr,
3881 struct mlx4_cmd_mailbox *inbox,
3882 struct mlx4_cmd_mailbox *outbox,
3883 struct mlx4_cmd_info *cmd)
3884 {
3885 int err;
3886 struct mlx4_qp_context *context = inbox->buf + 8;
3887
3888 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3889 if (err)
3890 return err;
3891 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3892 if (err)
3893 return err;
3894
3895 adjust_proxy_tun_qkey(dev, vhcr, context);
3896 update_gid(dev, inbox, (u8)slave);
3897 update_pkey_index(dev, slave, inbox);
3898 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3899 }
3900
3901 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3902 struct mlx4_vhcr *vhcr,
3903 struct mlx4_cmd_mailbox *inbox,
3904 struct mlx4_cmd_mailbox *outbox,
3905 struct mlx4_cmd_info *cmd)
3906 {
3907 int err;
3908 int qpn = vhcr->in_modifier & 0x7fffff;
3909 struct res_qp *qp;
3910
3911 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3912 if (err)
3913 return err;
3914 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3915 if (err)
3916 goto ex_abort;
3917
3918 atomic_dec(&qp->mtt->ref_count);
3919 atomic_dec(&qp->rcq->ref_count);
3920 atomic_dec(&qp->scq->ref_count);
3921 if (qp->srq)
3922 atomic_dec(&qp->srq->ref_count);
3923 res_end_move(dev, slave, RES_QP, qpn);
3924 return 0;
3925
3926 ex_abort:
3927 res_abort_move(dev, slave, RES_QP, qpn);
3928
3929 return err;
3930 }
3931
3932 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3933 struct res_qp *rqp, u8 *gid)
3934 {
3935 struct res_gid *res;
3936
3937 list_for_each_entry(res, &rqp->mcg_list, list) {
3938 if (!memcmp(res->gid, gid, 16))
3939 return res;
3940 }
3941 return NULL;
3942 }
3943
3944 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3945 u8 *gid, enum mlx4_protocol prot,
3946 enum mlx4_steer_type steer, u64 reg_id)
3947 {
3948 struct res_gid *res;
3949 int err;
3950
3951 res = kzalloc(sizeof *res, GFP_KERNEL);
3952 if (!res)
3953 return -ENOMEM;
3954
3955 spin_lock_irq(&rqp->mcg_spl);
3956 if (find_gid(dev, slave, rqp, gid)) {
3957 kfree(res);
3958 err = -EEXIST;
3959 } else {
3960 memcpy(res->gid, gid, 16);
3961 res->prot = prot;
3962 res->steer = steer;
3963 res->reg_id = reg_id;
3964 list_add_tail(&res->list, &rqp->mcg_list);
3965 err = 0;
3966 }
3967 spin_unlock_irq(&rqp->mcg_spl);
3968
3969 return err;
3970 }
3971
3972 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3973 u8 *gid, enum mlx4_protocol prot,
3974 enum mlx4_steer_type steer, u64 *reg_id)
3975 {
3976 struct res_gid *res;
3977 int err;
3978
3979 spin_lock_irq(&rqp->mcg_spl);
3980 res = find_gid(dev, slave, rqp, gid);
3981 if (!res || res->prot != prot || res->steer != steer)
3982 err = -EINVAL;
3983 else {
3984 *reg_id = res->reg_id;
3985 list_del(&res->list);
3986 kfree(res);
3987 err = 0;
3988 }
3989 spin_unlock_irq(&rqp->mcg_spl);
3990
3991 return err;
3992 }
3993
3994 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3995 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3996 enum mlx4_steer_type type, u64 *reg_id)
3997 {
3998 switch (dev->caps.steering_mode) {
3999 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4000 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4001 if (port < 0)
4002 return port;
4003 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4004 block_loopback, prot,
4005 reg_id);
4006 }
4007 case MLX4_STEERING_MODE_B0:
4008 if (prot == MLX4_PROT_ETH) {
4009 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4010 if (port < 0)
4011 return port;
4012 gid[5] = port;
4013 }
4014 return mlx4_qp_attach_common(dev, qp, gid,
4015 block_loopback, prot, type);
4016 default:
4017 return -EINVAL;
4018 }
4019 }
4020
4021 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4022 u8 gid[16], enum mlx4_protocol prot,
4023 enum mlx4_steer_type type, u64 reg_id)
4024 {
4025 switch (dev->caps.steering_mode) {
4026 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4027 return mlx4_flow_detach(dev, reg_id);
4028 case MLX4_STEERING_MODE_B0:
4029 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4030 default:
4031 return -EINVAL;
4032 }
4033 }
4034
4035 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4036 u8 *gid, enum mlx4_protocol prot)
4037 {
4038 int real_port;
4039
4040 if (prot != MLX4_PROT_ETH)
4041 return 0;
4042
4043 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4044 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4045 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4046 if (real_port < 0)
4047 return -EINVAL;
4048 gid[5] = real_port;
4049 }
4050
4051 return 0;
4052 }
4053
4054 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4055 struct mlx4_vhcr *vhcr,
4056 struct mlx4_cmd_mailbox *inbox,
4057 struct mlx4_cmd_mailbox *outbox,
4058 struct mlx4_cmd_info *cmd)
4059 {
4060 struct mlx4_qp qp; /* dummy for calling attach/detach */
4061 u8 *gid = inbox->buf;
4062 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4063 int err;
4064 int qpn;
4065 struct res_qp *rqp;
4066 u64 reg_id = 0;
4067 int attach = vhcr->op_modifier;
4068 int block_loopback = vhcr->in_modifier >> 31;
4069 u8 steer_type_mask = 2;
4070 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4071
4072 qpn = vhcr->in_modifier & 0xffffff;
4073 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4074 if (err)
4075 return err;
4076
4077 qp.qpn = qpn;
4078 if (attach) {
4079 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4080 type, &reg_id);
4081 if (err) {
4082 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4083 goto ex_put;
4084 }
4085 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4086 if (err)
4087 goto ex_detach;
4088 } else {
4089 err = mlx4_adjust_port(dev, slave, gid, prot);
4090 if (err)
4091 goto ex_put;
4092
4093 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
4094 if (err)
4095 goto ex_put;
4096
4097 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4098 if (err)
4099 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4100 qpn, reg_id);
4101 }
4102 put_res(dev, slave, qpn, RES_QP);
4103 return err;
4104
4105 ex_detach:
4106 qp_detach(dev, &qp, gid, prot, type, reg_id);
4107 ex_put:
4108 put_res(dev, slave, qpn, RES_QP);
4109 return err;
4110 }
4111
4112 /*
4113 * MAC validation for Flow Steering rules.
4114 * VF can attach rules only with a mac address which is assigned to it.
4115 */
4116 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4117 struct list_head *rlist)
4118 {
4119 struct mac_res *res, *tmp;
4120 __be64 be_mac;
4121
4122 /* make sure it isn't multicast or broadcast mac*/
4123 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4124 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4125 list_for_each_entry_safe(res, tmp, rlist, list) {
4126 be_mac = cpu_to_be64(res->mac << 16);
4127 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4128 return 0;
4129 }
4130 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4131 eth_header->eth.dst_mac, slave);
4132 return -EINVAL;
4133 }
4134 return 0;
4135 }
4136
4137 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4138 struct _rule_hw *eth_header)
4139 {
4140 if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4141 is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4142 struct mlx4_net_trans_rule_hw_eth *eth =
4143 (struct mlx4_net_trans_rule_hw_eth *)eth_header;
4144 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4145 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4146 next_rule->rsvd == 0;
4147
4148 if (last_rule)
4149 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4150 }
4151 }
4152
4153 /*
4154 * In case of missing eth header, append eth header with a MAC address
4155 * assigned to the VF.
4156 */
4157 static int add_eth_header(struct mlx4_dev *dev, int slave,
4158 struct mlx4_cmd_mailbox *inbox,
4159 struct list_head *rlist, int header_id)
4160 {
4161 struct mac_res *res, *tmp;
4162 u8 port;
4163 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4164 struct mlx4_net_trans_rule_hw_eth *eth_header;
4165 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4166 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4167 __be64 be_mac = 0;
4168 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4169
4170 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4171 port = ctrl->port;
4172 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4173
4174 /* Clear a space in the inbox for eth header */
4175 switch (header_id) {
4176 case MLX4_NET_TRANS_RULE_ID_IPV4:
4177 ip_header =
4178 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4179 memmove(ip_header, eth_header,
4180 sizeof(*ip_header) + sizeof(*l4_header));
4181 break;
4182 case MLX4_NET_TRANS_RULE_ID_TCP:
4183 case MLX4_NET_TRANS_RULE_ID_UDP:
4184 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4185 (eth_header + 1);
4186 memmove(l4_header, eth_header, sizeof(*l4_header));
4187 break;
4188 default:
4189 return -EINVAL;
4190 }
4191 list_for_each_entry_safe(res, tmp, rlist, list) {
4192 if (port == res->port) {
4193 be_mac = cpu_to_be64(res->mac << 16);
4194 break;
4195 }
4196 }
4197 if (!be_mac) {
4198 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4199 port);
4200 return -EINVAL;
4201 }
4202
4203 memset(eth_header, 0, sizeof(*eth_header));
4204 eth_header->size = sizeof(*eth_header) >> 2;
4205 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4206 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4207 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4208
4209 return 0;
4210
4211 }
4212
4213 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4214 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4215 struct mlx4_vhcr *vhcr,
4216 struct mlx4_cmd_mailbox *inbox,
4217 struct mlx4_cmd_mailbox *outbox,
4218 struct mlx4_cmd_info *cmd_info)
4219 {
4220 int err;
4221 u32 qpn = vhcr->in_modifier & 0xffffff;
4222 struct res_qp *rqp;
4223 u64 mac;
4224 unsigned port;
4225 u64 pri_addr_path_mask;
4226 struct mlx4_update_qp_context *cmd;
4227 int smac_index;
4228
4229 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4230
4231 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4232 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4233 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4234 return -EPERM;
4235
4236 /* Just change the smac for the QP */
4237 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4238 if (err) {
4239 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4240 return err;
4241 }
4242
4243 port = (rqp->sched_queue >> 6 & 1) + 1;
4244
4245 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4246 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4247 err = mac_find_smac_ix_in_slave(dev, slave, port,
4248 smac_index, &mac);
4249
4250 if (err) {
4251 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4252 qpn, smac_index);
4253 goto err_mac;
4254 }
4255 }
4256
4257 err = mlx4_cmd(dev, inbox->dma,
4258 vhcr->in_modifier, 0,
4259 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4260 MLX4_CMD_NATIVE);
4261 if (err) {
4262 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4263 goto err_mac;
4264 }
4265
4266 err_mac:
4267 put_res(dev, slave, qpn, RES_QP);
4268 return err;
4269 }
4270
4271 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4272 struct mlx4_vhcr *vhcr,
4273 struct mlx4_cmd_mailbox *inbox,
4274 struct mlx4_cmd_mailbox *outbox,
4275 struct mlx4_cmd_info *cmd)
4276 {
4277
4278 struct mlx4_priv *priv = mlx4_priv(dev);
4279 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4280 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4281 int err;
4282 int qpn;
4283 struct res_qp *rqp;
4284 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4285 struct _rule_hw *rule_header;
4286 int header_id;
4287
4288 if (dev->caps.steering_mode !=
4289 MLX4_STEERING_MODE_DEVICE_MANAGED)
4290 return -EOPNOTSUPP;
4291
4292 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4293 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4294 if (ctrl->port <= 0)
4295 return -EINVAL;
4296 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4297 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4298 if (err) {
4299 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4300 return err;
4301 }
4302 rule_header = (struct _rule_hw *)(ctrl + 1);
4303 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4304
4305 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4306 handle_eth_header_mcast_prio(ctrl, rule_header);
4307
4308 if (slave == dev->caps.function)
4309 goto execute;
4310
4311 switch (header_id) {
4312 case MLX4_NET_TRANS_RULE_ID_ETH:
4313 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4314 err = -EINVAL;
4315 goto err_put;
4316 }
4317 break;
4318 case MLX4_NET_TRANS_RULE_ID_IB:
4319 break;
4320 case MLX4_NET_TRANS_RULE_ID_IPV4:
4321 case MLX4_NET_TRANS_RULE_ID_TCP:
4322 case MLX4_NET_TRANS_RULE_ID_UDP:
4323 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4324 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4325 err = -EINVAL;
4326 goto err_put;
4327 }
4328 vhcr->in_modifier +=
4329 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4330 break;
4331 default:
4332 pr_err("Corrupted mailbox\n");
4333 err = -EINVAL;
4334 goto err_put;
4335 }
4336
4337 execute:
4338 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4339 vhcr->in_modifier, 0,
4340 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4341 MLX4_CMD_NATIVE);
4342 if (err)
4343 goto err_put;
4344
4345 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4346 if (err) {
4347 mlx4_err(dev, "Fail to add flow steering resources\n");
4348 /* detach rule*/
4349 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4350 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4351 MLX4_CMD_NATIVE);
4352 goto err_put;
4353 }
4354 atomic_inc(&rqp->ref_count);
4355 err_put:
4356 put_res(dev, slave, qpn, RES_QP);
4357 return err;
4358 }
4359
4360 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4361 struct mlx4_vhcr *vhcr,
4362 struct mlx4_cmd_mailbox *inbox,
4363 struct mlx4_cmd_mailbox *outbox,
4364 struct mlx4_cmd_info *cmd)
4365 {
4366 int err;
4367 struct res_qp *rqp;
4368 struct res_fs_rule *rrule;
4369
4370 if (dev->caps.steering_mode !=
4371 MLX4_STEERING_MODE_DEVICE_MANAGED)
4372 return -EOPNOTSUPP;
4373
4374 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4375 if (err)
4376 return err;
4377 /* Release the rule form busy state before removal */
4378 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4379 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4380 if (err)
4381 return err;
4382
4383 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4384 if (err) {
4385 mlx4_err(dev, "Fail to remove flow steering resources\n");
4386 goto out;
4387 }
4388
4389 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4390 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4391 MLX4_CMD_NATIVE);
4392 if (!err)
4393 atomic_dec(&rqp->ref_count);
4394 out:
4395 put_res(dev, slave, rrule->qpn, RES_QP);
4396 return err;
4397 }
4398
4399 enum {
4400 BUSY_MAX_RETRIES = 10
4401 };
4402
4403 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4404 struct mlx4_vhcr *vhcr,
4405 struct mlx4_cmd_mailbox *inbox,
4406 struct mlx4_cmd_mailbox *outbox,
4407 struct mlx4_cmd_info *cmd)
4408 {
4409 int err;
4410 int index = vhcr->in_modifier & 0xffff;
4411
4412 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4413 if (err)
4414 return err;
4415
4416 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4417 put_res(dev, slave, index, RES_COUNTER);
4418 return err;
4419 }
4420
4421 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4422 {
4423 struct res_gid *rgid;
4424 struct res_gid *tmp;
4425 struct mlx4_qp qp; /* dummy for calling attach/detach */
4426
4427 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4428 switch (dev->caps.steering_mode) {
4429 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4430 mlx4_flow_detach(dev, rgid->reg_id);
4431 break;
4432 case MLX4_STEERING_MODE_B0:
4433 qp.qpn = rqp->local_qpn;
4434 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4435 rgid->prot, rgid->steer);
4436 break;
4437 }
4438 list_del(&rgid->list);
4439 kfree(rgid);
4440 }
4441 }
4442
4443 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4444 enum mlx4_resource type, int print)
4445 {
4446 struct mlx4_priv *priv = mlx4_priv(dev);
4447 struct mlx4_resource_tracker *tracker =
4448 &priv->mfunc.master.res_tracker;
4449 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4450 struct res_common *r;
4451 struct res_common *tmp;
4452 int busy;
4453
4454 busy = 0;
4455 spin_lock_irq(mlx4_tlock(dev));
4456 list_for_each_entry_safe(r, tmp, rlist, list) {
4457 if (r->owner == slave) {
4458 if (!r->removing) {
4459 if (r->state == RES_ANY_BUSY) {
4460 if (print)
4461 mlx4_dbg(dev,
4462 "%s id 0x%llx is busy\n",
4463 resource_str(type),
4464 r->res_id);
4465 ++busy;
4466 } else {
4467 r->from_state = r->state;
4468 r->state = RES_ANY_BUSY;
4469 r->removing = 1;
4470 }
4471 }
4472 }
4473 }
4474 spin_unlock_irq(mlx4_tlock(dev));
4475
4476 return busy;
4477 }
4478
4479 static int move_all_busy(struct mlx4_dev *dev, int slave,
4480 enum mlx4_resource type)
4481 {
4482 unsigned long begin;
4483 int busy;
4484
4485 begin = jiffies;
4486 do {
4487 busy = _move_all_busy(dev, slave, type, 0);
4488 if (time_after(jiffies, begin + 5 * HZ))
4489 break;
4490 if (busy)
4491 cond_resched();
4492 } while (busy);
4493
4494 if (busy)
4495 busy = _move_all_busy(dev, slave, type, 1);
4496
4497 return busy;
4498 }
4499 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4500 {
4501 struct mlx4_priv *priv = mlx4_priv(dev);
4502 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4503 struct list_head *qp_list =
4504 &tracker->slave_list[slave].res_list[RES_QP];
4505 struct res_qp *qp;
4506 struct res_qp *tmp;
4507 int state;
4508 u64 in_param;
4509 int qpn;
4510 int err;
4511
4512 err = move_all_busy(dev, slave, RES_QP);
4513 if (err)
4514 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4515 slave);
4516
4517 spin_lock_irq(mlx4_tlock(dev));
4518 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4519 spin_unlock_irq(mlx4_tlock(dev));
4520 if (qp->com.owner == slave) {
4521 qpn = qp->com.res_id;
4522 detach_qp(dev, slave, qp);
4523 state = qp->com.from_state;
4524 while (state != 0) {
4525 switch (state) {
4526 case RES_QP_RESERVED:
4527 spin_lock_irq(mlx4_tlock(dev));
4528 rb_erase(&qp->com.node,
4529 &tracker->res_tree[RES_QP]);
4530 list_del(&qp->com.list);
4531 spin_unlock_irq(mlx4_tlock(dev));
4532 if (!valid_reserved(dev, slave, qpn)) {
4533 __mlx4_qp_release_range(dev, qpn, 1);
4534 mlx4_release_resource(dev, slave,
4535 RES_QP, 1, 0);
4536 }
4537 kfree(qp);
4538 state = 0;
4539 break;
4540 case RES_QP_MAPPED:
4541 if (!valid_reserved(dev, slave, qpn))
4542 __mlx4_qp_free_icm(dev, qpn);
4543 state = RES_QP_RESERVED;
4544 break;
4545 case RES_QP_HW:
4546 in_param = slave;
4547 err = mlx4_cmd(dev, in_param,
4548 qp->local_qpn, 2,
4549 MLX4_CMD_2RST_QP,
4550 MLX4_CMD_TIME_CLASS_A,
4551 MLX4_CMD_NATIVE);
4552 if (err)
4553 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4554 slave, qp->local_qpn);
4555 atomic_dec(&qp->rcq->ref_count);
4556 atomic_dec(&qp->scq->ref_count);
4557 atomic_dec(&qp->mtt->ref_count);
4558 if (qp->srq)
4559 atomic_dec(&qp->srq->ref_count);
4560 state = RES_QP_MAPPED;
4561 break;
4562 default:
4563 state = 0;
4564 }
4565 }
4566 }
4567 spin_lock_irq(mlx4_tlock(dev));
4568 }
4569 spin_unlock_irq(mlx4_tlock(dev));
4570 }
4571
4572 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4573 {
4574 struct mlx4_priv *priv = mlx4_priv(dev);
4575 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4576 struct list_head *srq_list =
4577 &tracker->slave_list[slave].res_list[RES_SRQ];
4578 struct res_srq *srq;
4579 struct res_srq *tmp;
4580 int state;
4581 u64 in_param;
4582 LIST_HEAD(tlist);
4583 int srqn;
4584 int err;
4585
4586 err = move_all_busy(dev, slave, RES_SRQ);
4587 if (err)
4588 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4589 slave);
4590
4591 spin_lock_irq(mlx4_tlock(dev));
4592 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4593 spin_unlock_irq(mlx4_tlock(dev));
4594 if (srq->com.owner == slave) {
4595 srqn = srq->com.res_id;
4596 state = srq->com.from_state;
4597 while (state != 0) {
4598 switch (state) {
4599 case RES_SRQ_ALLOCATED:
4600 __mlx4_srq_free_icm(dev, srqn);
4601 spin_lock_irq(mlx4_tlock(dev));
4602 rb_erase(&srq->com.node,
4603 &tracker->res_tree[RES_SRQ]);
4604 list_del(&srq->com.list);
4605 spin_unlock_irq(mlx4_tlock(dev));
4606 mlx4_release_resource(dev, slave,
4607 RES_SRQ, 1, 0);
4608 kfree(srq);
4609 state = 0;
4610 break;
4611
4612 case RES_SRQ_HW:
4613 in_param = slave;
4614 err = mlx4_cmd(dev, in_param, srqn, 1,
4615 MLX4_CMD_HW2SW_SRQ,
4616 MLX4_CMD_TIME_CLASS_A,
4617 MLX4_CMD_NATIVE);
4618 if (err)
4619 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4620 slave, srqn);
4621
4622 atomic_dec(&srq->mtt->ref_count);
4623 if (srq->cq)
4624 atomic_dec(&srq->cq->ref_count);
4625 state = RES_SRQ_ALLOCATED;
4626 break;
4627
4628 default:
4629 state = 0;
4630 }
4631 }
4632 }
4633 spin_lock_irq(mlx4_tlock(dev));
4634 }
4635 spin_unlock_irq(mlx4_tlock(dev));
4636 }
4637
4638 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4639 {
4640 struct mlx4_priv *priv = mlx4_priv(dev);
4641 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4642 struct list_head *cq_list =
4643 &tracker->slave_list[slave].res_list[RES_CQ];
4644 struct res_cq *cq;
4645 struct res_cq *tmp;
4646 int state;
4647 u64 in_param;
4648 LIST_HEAD(tlist);
4649 int cqn;
4650 int err;
4651
4652 err = move_all_busy(dev, slave, RES_CQ);
4653 if (err)
4654 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4655 slave);
4656
4657 spin_lock_irq(mlx4_tlock(dev));
4658 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4659 spin_unlock_irq(mlx4_tlock(dev));
4660 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4661 cqn = cq->com.res_id;
4662 state = cq->com.from_state;
4663 while (state != 0) {
4664 switch (state) {
4665 case RES_CQ_ALLOCATED:
4666 __mlx4_cq_free_icm(dev, cqn);
4667 spin_lock_irq(mlx4_tlock(dev));
4668 rb_erase(&cq->com.node,
4669 &tracker->res_tree[RES_CQ]);
4670 list_del(&cq->com.list);
4671 spin_unlock_irq(mlx4_tlock(dev));
4672 mlx4_release_resource(dev, slave,
4673 RES_CQ, 1, 0);
4674 kfree(cq);
4675 state = 0;
4676 break;
4677
4678 case RES_CQ_HW:
4679 in_param = slave;
4680 err = mlx4_cmd(dev, in_param, cqn, 1,
4681 MLX4_CMD_HW2SW_CQ,
4682 MLX4_CMD_TIME_CLASS_A,
4683 MLX4_CMD_NATIVE);
4684 if (err)
4685 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4686 slave, cqn);
4687 atomic_dec(&cq->mtt->ref_count);
4688 state = RES_CQ_ALLOCATED;
4689 break;
4690
4691 default:
4692 state = 0;
4693 }
4694 }
4695 }
4696 spin_lock_irq(mlx4_tlock(dev));
4697 }
4698 spin_unlock_irq(mlx4_tlock(dev));
4699 }
4700
4701 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4702 {
4703 struct mlx4_priv *priv = mlx4_priv(dev);
4704 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4705 struct list_head *mpt_list =
4706 &tracker->slave_list[slave].res_list[RES_MPT];
4707 struct res_mpt *mpt;
4708 struct res_mpt *tmp;
4709 int state;
4710 u64 in_param;
4711 LIST_HEAD(tlist);
4712 int mptn;
4713 int err;
4714
4715 err = move_all_busy(dev, slave, RES_MPT);
4716 if (err)
4717 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4718 slave);
4719
4720 spin_lock_irq(mlx4_tlock(dev));
4721 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4722 spin_unlock_irq(mlx4_tlock(dev));
4723 if (mpt->com.owner == slave) {
4724 mptn = mpt->com.res_id;
4725 state = mpt->com.from_state;
4726 while (state != 0) {
4727 switch (state) {
4728 case RES_MPT_RESERVED:
4729 __mlx4_mpt_release(dev, mpt->key);
4730 spin_lock_irq(mlx4_tlock(dev));
4731 rb_erase(&mpt->com.node,
4732 &tracker->res_tree[RES_MPT]);
4733 list_del(&mpt->com.list);
4734 spin_unlock_irq(mlx4_tlock(dev));
4735 mlx4_release_resource(dev, slave,
4736 RES_MPT, 1, 0);
4737 kfree(mpt);
4738 state = 0;
4739 break;
4740
4741 case RES_MPT_MAPPED:
4742 __mlx4_mpt_free_icm(dev, mpt->key);
4743 state = RES_MPT_RESERVED;
4744 break;
4745
4746 case RES_MPT_HW:
4747 in_param = slave;
4748 err = mlx4_cmd(dev, in_param, mptn, 0,
4749 MLX4_CMD_HW2SW_MPT,
4750 MLX4_CMD_TIME_CLASS_A,
4751 MLX4_CMD_NATIVE);
4752 if (err)
4753 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4754 slave, mptn);
4755 if (mpt->mtt)
4756 atomic_dec(&mpt->mtt->ref_count);
4757 state = RES_MPT_MAPPED;
4758 break;
4759 default:
4760 state = 0;
4761 }
4762 }
4763 }
4764 spin_lock_irq(mlx4_tlock(dev));
4765 }
4766 spin_unlock_irq(mlx4_tlock(dev));
4767 }
4768
4769 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4770 {
4771 struct mlx4_priv *priv = mlx4_priv(dev);
4772 struct mlx4_resource_tracker *tracker =
4773 &priv->mfunc.master.res_tracker;
4774 struct list_head *mtt_list =
4775 &tracker->slave_list[slave].res_list[RES_MTT];
4776 struct res_mtt *mtt;
4777 struct res_mtt *tmp;
4778 int state;
4779 LIST_HEAD(tlist);
4780 int base;
4781 int err;
4782
4783 err = move_all_busy(dev, slave, RES_MTT);
4784 if (err)
4785 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4786 slave);
4787
4788 spin_lock_irq(mlx4_tlock(dev));
4789 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4790 spin_unlock_irq(mlx4_tlock(dev));
4791 if (mtt->com.owner == slave) {
4792 base = mtt->com.res_id;
4793 state = mtt->com.from_state;
4794 while (state != 0) {
4795 switch (state) {
4796 case RES_MTT_ALLOCATED:
4797 __mlx4_free_mtt_range(dev, base,
4798 mtt->order);
4799 spin_lock_irq(mlx4_tlock(dev));
4800 rb_erase(&mtt->com.node,
4801 &tracker->res_tree[RES_MTT]);
4802 list_del(&mtt->com.list);
4803 spin_unlock_irq(mlx4_tlock(dev));
4804 mlx4_release_resource(dev, slave, RES_MTT,
4805 1 << mtt->order, 0);
4806 kfree(mtt);
4807 state = 0;
4808 break;
4809
4810 default:
4811 state = 0;
4812 }
4813 }
4814 }
4815 spin_lock_irq(mlx4_tlock(dev));
4816 }
4817 spin_unlock_irq(mlx4_tlock(dev));
4818 }
4819
4820 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4821 {
4822 struct mlx4_priv *priv = mlx4_priv(dev);
4823 struct mlx4_resource_tracker *tracker =
4824 &priv->mfunc.master.res_tracker;
4825 struct list_head *fs_rule_list =
4826 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4827 struct res_fs_rule *fs_rule;
4828 struct res_fs_rule *tmp;
4829 int state;
4830 u64 base;
4831 int err;
4832
4833 err = move_all_busy(dev, slave, RES_FS_RULE);
4834 if (err)
4835 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4836 slave);
4837
4838 spin_lock_irq(mlx4_tlock(dev));
4839 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4840 spin_unlock_irq(mlx4_tlock(dev));
4841 if (fs_rule->com.owner == slave) {
4842 base = fs_rule->com.res_id;
4843 state = fs_rule->com.from_state;
4844 while (state != 0) {
4845 switch (state) {
4846 case RES_FS_RULE_ALLOCATED:
4847 /* detach rule */
4848 err = mlx4_cmd(dev, base, 0, 0,
4849 MLX4_QP_FLOW_STEERING_DETACH,
4850 MLX4_CMD_TIME_CLASS_A,
4851 MLX4_CMD_NATIVE);
4852
4853 spin_lock_irq(mlx4_tlock(dev));
4854 rb_erase(&fs_rule->com.node,
4855 &tracker->res_tree[RES_FS_RULE]);
4856 list_del(&fs_rule->com.list);
4857 spin_unlock_irq(mlx4_tlock(dev));
4858 kfree(fs_rule);
4859 state = 0;
4860 break;
4861
4862 default:
4863 state = 0;
4864 }
4865 }
4866 }
4867 spin_lock_irq(mlx4_tlock(dev));
4868 }
4869 spin_unlock_irq(mlx4_tlock(dev));
4870 }
4871
4872 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4873 {
4874 struct mlx4_priv *priv = mlx4_priv(dev);
4875 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4876 struct list_head *eq_list =
4877 &tracker->slave_list[slave].res_list[RES_EQ];
4878 struct res_eq *eq;
4879 struct res_eq *tmp;
4880 int err;
4881 int state;
4882 LIST_HEAD(tlist);
4883 int eqn;
4884
4885 err = move_all_busy(dev, slave, RES_EQ);
4886 if (err)
4887 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4888 slave);
4889
4890 spin_lock_irq(mlx4_tlock(dev));
4891 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4892 spin_unlock_irq(mlx4_tlock(dev));
4893 if (eq->com.owner == slave) {
4894 eqn = eq->com.res_id;
4895 state = eq->com.from_state;
4896 while (state != 0) {
4897 switch (state) {
4898 case RES_EQ_RESERVED:
4899 spin_lock_irq(mlx4_tlock(dev));
4900 rb_erase(&eq->com.node,
4901 &tracker->res_tree[RES_EQ]);
4902 list_del(&eq->com.list);
4903 spin_unlock_irq(mlx4_tlock(dev));
4904 kfree(eq);
4905 state = 0;
4906 break;
4907
4908 case RES_EQ_HW:
4909 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
4910 1, MLX4_CMD_HW2SW_EQ,
4911 MLX4_CMD_TIME_CLASS_A,
4912 MLX4_CMD_NATIVE);
4913 if (err)
4914 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4915 slave, eqn & 0x3ff);
4916 atomic_dec(&eq->mtt->ref_count);
4917 state = RES_EQ_RESERVED;
4918 break;
4919
4920 default:
4921 state = 0;
4922 }
4923 }
4924 }
4925 spin_lock_irq(mlx4_tlock(dev));
4926 }
4927 spin_unlock_irq(mlx4_tlock(dev));
4928 }
4929
4930 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4931 {
4932 struct mlx4_priv *priv = mlx4_priv(dev);
4933 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4934 struct list_head *counter_list =
4935 &tracker->slave_list[slave].res_list[RES_COUNTER];
4936 struct res_counter *counter;
4937 struct res_counter *tmp;
4938 int err;
4939 int index;
4940
4941 err = move_all_busy(dev, slave, RES_COUNTER);
4942 if (err)
4943 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4944 slave);
4945
4946 spin_lock_irq(mlx4_tlock(dev));
4947 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4948 if (counter->com.owner == slave) {
4949 index = counter->com.res_id;
4950 rb_erase(&counter->com.node,
4951 &tracker->res_tree[RES_COUNTER]);
4952 list_del(&counter->com.list);
4953 kfree(counter);
4954 __mlx4_counter_free(dev, index);
4955 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4956 }
4957 }
4958 spin_unlock_irq(mlx4_tlock(dev));
4959 }
4960
4961 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4962 {
4963 struct mlx4_priv *priv = mlx4_priv(dev);
4964 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4965 struct list_head *xrcdn_list =
4966 &tracker->slave_list[slave].res_list[RES_XRCD];
4967 struct res_xrcdn *xrcd;
4968 struct res_xrcdn *tmp;
4969 int err;
4970 int xrcdn;
4971
4972 err = move_all_busy(dev, slave, RES_XRCD);
4973 if (err)
4974 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4975 slave);
4976
4977 spin_lock_irq(mlx4_tlock(dev));
4978 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4979 if (xrcd->com.owner == slave) {
4980 xrcdn = xrcd->com.res_id;
4981 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4982 list_del(&xrcd->com.list);
4983 kfree(xrcd);
4984 __mlx4_xrcd_free(dev, xrcdn);
4985 }
4986 }
4987 spin_unlock_irq(mlx4_tlock(dev));
4988 }
4989
4990 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4991 {
4992 struct mlx4_priv *priv = mlx4_priv(dev);
4993 mlx4_reset_roce_gids(dev, slave);
4994 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4995 rem_slave_vlans(dev, slave);
4996 rem_slave_macs(dev, slave);
4997 rem_slave_fs_rule(dev, slave);
4998 rem_slave_qps(dev, slave);
4999 rem_slave_srqs(dev, slave);
5000 rem_slave_cqs(dev, slave);
5001 rem_slave_mrs(dev, slave);
5002 rem_slave_eqs(dev, slave);
5003 rem_slave_mtts(dev, slave);
5004 rem_slave_counters(dev, slave);
5005 rem_slave_xrcdns(dev, slave);
5006 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5007 }
5008
5009 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5010 {
5011 struct mlx4_vf_immed_vlan_work *work =
5012 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5013 struct mlx4_cmd_mailbox *mailbox;
5014 struct mlx4_update_qp_context *upd_context;
5015 struct mlx4_dev *dev = &work->priv->dev;
5016 struct mlx4_resource_tracker *tracker =
5017 &work->priv->mfunc.master.res_tracker;
5018 struct list_head *qp_list =
5019 &tracker->slave_list[work->slave].res_list[RES_QP];
5020 struct res_qp *qp;
5021 struct res_qp *tmp;
5022 u64 qp_path_mask_vlan_ctrl =
5023 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5024 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5025 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5026 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5027 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5028 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5029
5030 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5031 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5032 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5033 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5034 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5035 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5036 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5037
5038 int err;
5039 int port, errors = 0;
5040 u8 vlan_control;
5041
5042 if (mlx4_is_slave(dev)) {
5043 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5044 work->slave);
5045 goto out;
5046 }
5047
5048 mailbox = mlx4_alloc_cmd_mailbox(dev);
5049 if (IS_ERR(mailbox))
5050 goto out;
5051 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5052 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5053 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5054 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5055 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5056 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5057 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5058 else if (!work->vlan_id)
5059 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5060 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5061 else
5062 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5063 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5064 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5065
5066 upd_context = mailbox->buf;
5067 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5068
5069 spin_lock_irq(mlx4_tlock(dev));
5070 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5071 spin_unlock_irq(mlx4_tlock(dev));
5072 if (qp->com.owner == work->slave) {
5073 if (qp->com.from_state != RES_QP_HW ||
5074 !qp->sched_queue || /* no INIT2RTR trans yet */
5075 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5076 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5077 spin_lock_irq(mlx4_tlock(dev));
5078 continue;
5079 }
5080 port = (qp->sched_queue >> 6 & 1) + 1;
5081 if (port != work->port) {
5082 spin_lock_irq(mlx4_tlock(dev));
5083 continue;
5084 }
5085 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5086 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5087 else
5088 upd_context->primary_addr_path_mask =
5089 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5090 if (work->vlan_id == MLX4_VGT) {
5091 upd_context->qp_context.param3 = qp->param3;
5092 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5093 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5094 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5095 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5096 upd_context->qp_context.pri_path.feup = qp->feup;
5097 upd_context->qp_context.pri_path.sched_queue =
5098 qp->sched_queue;
5099 } else {
5100 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5101 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5102 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5103 upd_context->qp_context.pri_path.fvl_rx =
5104 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5105 upd_context->qp_context.pri_path.fl =
5106 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
5107 upd_context->qp_context.pri_path.feup =
5108 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5109 upd_context->qp_context.pri_path.sched_queue =
5110 qp->sched_queue & 0xC7;
5111 upd_context->qp_context.pri_path.sched_queue |=
5112 ((work->qos & 0x7) << 3);
5113 upd_context->qp_mask |=
5114 cpu_to_be64(1ULL <<
5115 MLX4_UPD_QP_MASK_QOS_VPP);
5116 upd_context->qp_context.qos_vport =
5117 work->qos_vport;
5118 }
5119
5120 err = mlx4_cmd(dev, mailbox->dma,
5121 qp->local_qpn & 0xffffff,
5122 0, MLX4_CMD_UPDATE_QP,
5123 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5124 if (err) {
5125 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5126 work->slave, port, qp->local_qpn, err);
5127 errors++;
5128 }
5129 }
5130 spin_lock_irq(mlx4_tlock(dev));
5131 }
5132 spin_unlock_irq(mlx4_tlock(dev));
5133 mlx4_free_cmd_mailbox(dev, mailbox);
5134
5135 if (errors)
5136 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5137 errors, work->slave, work->port);
5138
5139 /* unregister previous vlan_id if needed and we had no errors
5140 * while updating the QPs
5141 */
5142 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5143 NO_INDX != work->orig_vlan_ix)
5144 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5145 work->orig_vlan_id);
5146 out:
5147 kfree(work);
5148 return;
5149 }