]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
mlx4_core: Add proxy and tunnel QPs to the reserved QP area
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID (1ull << 63)
51
52 struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56 };
57
58 struct res_common {
59 struct list_head list;
60 struct rb_node node;
61 u64 res_id;
62 int owner;
63 int state;
64 int from_state;
65 int to_state;
66 int removing;
67 };
68
69 enum {
70 RES_ANY_BUSY = 1
71 };
72
73 struct res_gid {
74 struct list_head list;
75 u8 gid[16];
76 enum mlx4_protocol prot;
77 enum mlx4_steer_type steer;
78 };
79
80 enum res_qp_states {
81 RES_QP_BUSY = RES_ANY_BUSY,
82
83 /* QP number was allocated */
84 RES_QP_RESERVED,
85
86 /* ICM memory for QP context was mapped */
87 RES_QP_MAPPED,
88
89 /* QP is in hw ownership */
90 RES_QP_HW
91 };
92
93 struct res_qp {
94 struct res_common com;
95 struct res_mtt *mtt;
96 struct res_cq *rcq;
97 struct res_cq *scq;
98 struct res_srq *srq;
99 struct list_head mcg_list;
100 spinlock_t mcg_spl;
101 int local_qpn;
102 };
103
104 enum res_mtt_states {
105 RES_MTT_BUSY = RES_ANY_BUSY,
106 RES_MTT_ALLOCATED,
107 };
108
109 static inline const char *mtt_states_str(enum res_mtt_states state)
110 {
111 switch (state) {
112 case RES_MTT_BUSY: return "RES_MTT_BUSY";
113 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
114 default: return "Unknown";
115 }
116 }
117
118 struct res_mtt {
119 struct res_common com;
120 int order;
121 atomic_t ref_count;
122 };
123
124 enum res_mpt_states {
125 RES_MPT_BUSY = RES_ANY_BUSY,
126 RES_MPT_RESERVED,
127 RES_MPT_MAPPED,
128 RES_MPT_HW,
129 };
130
131 struct res_mpt {
132 struct res_common com;
133 struct res_mtt *mtt;
134 int key;
135 };
136
137 enum res_eq_states {
138 RES_EQ_BUSY = RES_ANY_BUSY,
139 RES_EQ_RESERVED,
140 RES_EQ_HW,
141 };
142
143 struct res_eq {
144 struct res_common com;
145 struct res_mtt *mtt;
146 };
147
148 enum res_cq_states {
149 RES_CQ_BUSY = RES_ANY_BUSY,
150 RES_CQ_ALLOCATED,
151 RES_CQ_HW,
152 };
153
154 struct res_cq {
155 struct res_common com;
156 struct res_mtt *mtt;
157 atomic_t ref_count;
158 };
159
160 enum res_srq_states {
161 RES_SRQ_BUSY = RES_ANY_BUSY,
162 RES_SRQ_ALLOCATED,
163 RES_SRQ_HW,
164 };
165
166 struct res_srq {
167 struct res_common com;
168 struct res_mtt *mtt;
169 struct res_cq *cq;
170 atomic_t ref_count;
171 };
172
173 enum res_counter_states {
174 RES_COUNTER_BUSY = RES_ANY_BUSY,
175 RES_COUNTER_ALLOCATED,
176 };
177
178 struct res_counter {
179 struct res_common com;
180 int port;
181 };
182
183 enum res_xrcdn_states {
184 RES_XRCD_BUSY = RES_ANY_BUSY,
185 RES_XRCD_ALLOCATED,
186 };
187
188 struct res_xrcdn {
189 struct res_common com;
190 int port;
191 };
192
193 enum res_fs_rule_states {
194 RES_FS_RULE_BUSY = RES_ANY_BUSY,
195 RES_FS_RULE_ALLOCATED,
196 };
197
198 struct res_fs_rule {
199 struct res_common com;
200 };
201
202 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
203 {
204 struct rb_node *node = root->rb_node;
205
206 while (node) {
207 struct res_common *res = container_of(node, struct res_common,
208 node);
209
210 if (res_id < res->res_id)
211 node = node->rb_left;
212 else if (res_id > res->res_id)
213 node = node->rb_right;
214 else
215 return res;
216 }
217 return NULL;
218 }
219
220 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
221 {
222 struct rb_node **new = &(root->rb_node), *parent = NULL;
223
224 /* Figure out where to put new node */
225 while (*new) {
226 struct res_common *this = container_of(*new, struct res_common,
227 node);
228
229 parent = *new;
230 if (res->res_id < this->res_id)
231 new = &((*new)->rb_left);
232 else if (res->res_id > this->res_id)
233 new = &((*new)->rb_right);
234 else
235 return -EEXIST;
236 }
237
238 /* Add new node and rebalance tree. */
239 rb_link_node(&res->node, parent, new);
240 rb_insert_color(&res->node, root);
241
242 return 0;
243 }
244
245 /* For Debug uses */
246 static const char *ResourceType(enum mlx4_resource rt)
247 {
248 switch (rt) {
249 case RES_QP: return "RES_QP";
250 case RES_CQ: return "RES_CQ";
251 case RES_SRQ: return "RES_SRQ";
252 case RES_MPT: return "RES_MPT";
253 case RES_MTT: return "RES_MTT";
254 case RES_MAC: return "RES_MAC";
255 case RES_EQ: return "RES_EQ";
256 case RES_COUNTER: return "RES_COUNTER";
257 case RES_FS_RULE: return "RES_FS_RULE";
258 case RES_XRCD: return "RES_XRCD";
259 default: return "Unknown resource type !!!";
260 };
261 }
262
263 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
264 {
265 struct mlx4_priv *priv = mlx4_priv(dev);
266 int i;
267 int t;
268
269 priv->mfunc.master.res_tracker.slave_list =
270 kzalloc(dev->num_slaves * sizeof(struct slave_list),
271 GFP_KERNEL);
272 if (!priv->mfunc.master.res_tracker.slave_list)
273 return -ENOMEM;
274
275 for (i = 0 ; i < dev->num_slaves; i++) {
276 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
277 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
278 slave_list[i].res_list[t]);
279 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
280 }
281
282 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
283 dev->num_slaves);
284 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
285 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
286
287 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
288 return 0 ;
289 }
290
291 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
292 enum mlx4_res_tracker_free_type type)
293 {
294 struct mlx4_priv *priv = mlx4_priv(dev);
295 int i;
296
297 if (priv->mfunc.master.res_tracker.slave_list) {
298 if (type != RES_TR_FREE_STRUCTS_ONLY)
299 for (i = 0 ; i < dev->num_slaves; i++)
300 if (type == RES_TR_FREE_ALL ||
301 dev->caps.function != i)
302 mlx4_delete_all_resources_for_slave(dev, i);
303
304 if (type != RES_TR_FREE_SLAVES_ONLY) {
305 kfree(priv->mfunc.master.res_tracker.slave_list);
306 priv->mfunc.master.res_tracker.slave_list = NULL;
307 }
308 }
309 }
310
311 static void update_ud_gid(struct mlx4_dev *dev,
312 struct mlx4_qp_context *qp_ctx, u8 slave)
313 {
314 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
315
316 if (MLX4_QP_ST_UD == ts)
317 qp_ctx->pri_path.mgid_index = 0x80 | slave;
318
319 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
320 slave, qp_ctx->pri_path.mgid_index);
321 }
322
323 static int mpt_mask(struct mlx4_dev *dev)
324 {
325 return dev->caps.num_mpts - 1;
326 }
327
328 static void *find_res(struct mlx4_dev *dev, int res_id,
329 enum mlx4_resource type)
330 {
331 struct mlx4_priv *priv = mlx4_priv(dev);
332
333 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
334 res_id);
335 }
336
337 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
338 enum mlx4_resource type,
339 void *res)
340 {
341 struct res_common *r;
342 int err = 0;
343
344 spin_lock_irq(mlx4_tlock(dev));
345 r = find_res(dev, res_id, type);
346 if (!r) {
347 err = -ENONET;
348 goto exit;
349 }
350
351 if (r->state == RES_ANY_BUSY) {
352 err = -EBUSY;
353 goto exit;
354 }
355
356 if (r->owner != slave) {
357 err = -EPERM;
358 goto exit;
359 }
360
361 r->from_state = r->state;
362 r->state = RES_ANY_BUSY;
363 mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
364 ResourceType(type), r->res_id);
365
366 if (res)
367 *((struct res_common **)res) = r;
368
369 exit:
370 spin_unlock_irq(mlx4_tlock(dev));
371 return err;
372 }
373
374 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
375 enum mlx4_resource type,
376 u64 res_id, int *slave)
377 {
378
379 struct res_common *r;
380 int err = -ENOENT;
381 int id = res_id;
382
383 if (type == RES_QP)
384 id &= 0x7fffff;
385 spin_lock(mlx4_tlock(dev));
386
387 r = find_res(dev, id, type);
388 if (r) {
389 *slave = r->owner;
390 err = 0;
391 }
392 spin_unlock(mlx4_tlock(dev));
393
394 return err;
395 }
396
397 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
398 enum mlx4_resource type)
399 {
400 struct res_common *r;
401
402 spin_lock_irq(mlx4_tlock(dev));
403 r = find_res(dev, res_id, type);
404 if (r)
405 r->state = r->from_state;
406 spin_unlock_irq(mlx4_tlock(dev));
407 }
408
409 static struct res_common *alloc_qp_tr(int id)
410 {
411 struct res_qp *ret;
412
413 ret = kzalloc(sizeof *ret, GFP_KERNEL);
414 if (!ret)
415 return NULL;
416
417 ret->com.res_id = id;
418 ret->com.state = RES_QP_RESERVED;
419 ret->local_qpn = id;
420 INIT_LIST_HEAD(&ret->mcg_list);
421 spin_lock_init(&ret->mcg_spl);
422
423 return &ret->com;
424 }
425
426 static struct res_common *alloc_mtt_tr(int id, int order)
427 {
428 struct res_mtt *ret;
429
430 ret = kzalloc(sizeof *ret, GFP_KERNEL);
431 if (!ret)
432 return NULL;
433
434 ret->com.res_id = id;
435 ret->order = order;
436 ret->com.state = RES_MTT_ALLOCATED;
437 atomic_set(&ret->ref_count, 0);
438
439 return &ret->com;
440 }
441
442 static struct res_common *alloc_mpt_tr(int id, int key)
443 {
444 struct res_mpt *ret;
445
446 ret = kzalloc(sizeof *ret, GFP_KERNEL);
447 if (!ret)
448 return NULL;
449
450 ret->com.res_id = id;
451 ret->com.state = RES_MPT_RESERVED;
452 ret->key = key;
453
454 return &ret->com;
455 }
456
457 static struct res_common *alloc_eq_tr(int id)
458 {
459 struct res_eq *ret;
460
461 ret = kzalloc(sizeof *ret, GFP_KERNEL);
462 if (!ret)
463 return NULL;
464
465 ret->com.res_id = id;
466 ret->com.state = RES_EQ_RESERVED;
467
468 return &ret->com;
469 }
470
471 static struct res_common *alloc_cq_tr(int id)
472 {
473 struct res_cq *ret;
474
475 ret = kzalloc(sizeof *ret, GFP_KERNEL);
476 if (!ret)
477 return NULL;
478
479 ret->com.res_id = id;
480 ret->com.state = RES_CQ_ALLOCATED;
481 atomic_set(&ret->ref_count, 0);
482
483 return &ret->com;
484 }
485
486 static struct res_common *alloc_srq_tr(int id)
487 {
488 struct res_srq *ret;
489
490 ret = kzalloc(sizeof *ret, GFP_KERNEL);
491 if (!ret)
492 return NULL;
493
494 ret->com.res_id = id;
495 ret->com.state = RES_SRQ_ALLOCATED;
496 atomic_set(&ret->ref_count, 0);
497
498 return &ret->com;
499 }
500
501 static struct res_common *alloc_counter_tr(int id)
502 {
503 struct res_counter *ret;
504
505 ret = kzalloc(sizeof *ret, GFP_KERNEL);
506 if (!ret)
507 return NULL;
508
509 ret->com.res_id = id;
510 ret->com.state = RES_COUNTER_ALLOCATED;
511
512 return &ret->com;
513 }
514
515 static struct res_common *alloc_xrcdn_tr(int id)
516 {
517 struct res_xrcdn *ret;
518
519 ret = kzalloc(sizeof *ret, GFP_KERNEL);
520 if (!ret)
521 return NULL;
522
523 ret->com.res_id = id;
524 ret->com.state = RES_XRCD_ALLOCATED;
525
526 return &ret->com;
527 }
528
529 static struct res_common *alloc_fs_rule_tr(u64 id)
530 {
531 struct res_fs_rule *ret;
532
533 ret = kzalloc(sizeof *ret, GFP_KERNEL);
534 if (!ret)
535 return NULL;
536
537 ret->com.res_id = id;
538 ret->com.state = RES_FS_RULE_ALLOCATED;
539
540 return &ret->com;
541 }
542
543 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
544 int extra)
545 {
546 struct res_common *ret;
547
548 switch (type) {
549 case RES_QP:
550 ret = alloc_qp_tr(id);
551 break;
552 case RES_MPT:
553 ret = alloc_mpt_tr(id, extra);
554 break;
555 case RES_MTT:
556 ret = alloc_mtt_tr(id, extra);
557 break;
558 case RES_EQ:
559 ret = alloc_eq_tr(id);
560 break;
561 case RES_CQ:
562 ret = alloc_cq_tr(id);
563 break;
564 case RES_SRQ:
565 ret = alloc_srq_tr(id);
566 break;
567 case RES_MAC:
568 printk(KERN_ERR "implementation missing\n");
569 return NULL;
570 case RES_COUNTER:
571 ret = alloc_counter_tr(id);
572 break;
573 case RES_XRCD:
574 ret = alloc_xrcdn_tr(id);
575 break;
576 case RES_FS_RULE:
577 ret = alloc_fs_rule_tr(id);
578 break;
579 default:
580 return NULL;
581 }
582 if (ret)
583 ret->owner = slave;
584
585 return ret;
586 }
587
588 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
589 enum mlx4_resource type, int extra)
590 {
591 int i;
592 int err;
593 struct mlx4_priv *priv = mlx4_priv(dev);
594 struct res_common **res_arr;
595 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
596 struct rb_root *root = &tracker->res_tree[type];
597
598 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
599 if (!res_arr)
600 return -ENOMEM;
601
602 for (i = 0; i < count; ++i) {
603 res_arr[i] = alloc_tr(base + i, type, slave, extra);
604 if (!res_arr[i]) {
605 for (--i; i >= 0; --i)
606 kfree(res_arr[i]);
607
608 kfree(res_arr);
609 return -ENOMEM;
610 }
611 }
612
613 spin_lock_irq(mlx4_tlock(dev));
614 for (i = 0; i < count; ++i) {
615 if (find_res(dev, base + i, type)) {
616 err = -EEXIST;
617 goto undo;
618 }
619 err = res_tracker_insert(root, res_arr[i]);
620 if (err)
621 goto undo;
622 list_add_tail(&res_arr[i]->list,
623 &tracker->slave_list[slave].res_list[type]);
624 }
625 spin_unlock_irq(mlx4_tlock(dev));
626 kfree(res_arr);
627
628 return 0;
629
630 undo:
631 for (--i; i >= base; --i)
632 rb_erase(&res_arr[i]->node, root);
633
634 spin_unlock_irq(mlx4_tlock(dev));
635
636 for (i = 0; i < count; ++i)
637 kfree(res_arr[i]);
638
639 kfree(res_arr);
640
641 return err;
642 }
643
644 static int remove_qp_ok(struct res_qp *res)
645 {
646 if (res->com.state == RES_QP_BUSY)
647 return -EBUSY;
648 else if (res->com.state != RES_QP_RESERVED)
649 return -EPERM;
650
651 return 0;
652 }
653
654 static int remove_mtt_ok(struct res_mtt *res, int order)
655 {
656 if (res->com.state == RES_MTT_BUSY ||
657 atomic_read(&res->ref_count)) {
658 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
659 __func__, __LINE__,
660 mtt_states_str(res->com.state),
661 atomic_read(&res->ref_count));
662 return -EBUSY;
663 } else if (res->com.state != RES_MTT_ALLOCATED)
664 return -EPERM;
665 else if (res->order != order)
666 return -EINVAL;
667
668 return 0;
669 }
670
671 static int remove_mpt_ok(struct res_mpt *res)
672 {
673 if (res->com.state == RES_MPT_BUSY)
674 return -EBUSY;
675 else if (res->com.state != RES_MPT_RESERVED)
676 return -EPERM;
677
678 return 0;
679 }
680
681 static int remove_eq_ok(struct res_eq *res)
682 {
683 if (res->com.state == RES_MPT_BUSY)
684 return -EBUSY;
685 else if (res->com.state != RES_MPT_RESERVED)
686 return -EPERM;
687
688 return 0;
689 }
690
691 static int remove_counter_ok(struct res_counter *res)
692 {
693 if (res->com.state == RES_COUNTER_BUSY)
694 return -EBUSY;
695 else if (res->com.state != RES_COUNTER_ALLOCATED)
696 return -EPERM;
697
698 return 0;
699 }
700
701 static int remove_xrcdn_ok(struct res_xrcdn *res)
702 {
703 if (res->com.state == RES_XRCD_BUSY)
704 return -EBUSY;
705 else if (res->com.state != RES_XRCD_ALLOCATED)
706 return -EPERM;
707
708 return 0;
709 }
710
711 static int remove_fs_rule_ok(struct res_fs_rule *res)
712 {
713 if (res->com.state == RES_FS_RULE_BUSY)
714 return -EBUSY;
715 else if (res->com.state != RES_FS_RULE_ALLOCATED)
716 return -EPERM;
717
718 return 0;
719 }
720
721 static int remove_cq_ok(struct res_cq *res)
722 {
723 if (res->com.state == RES_CQ_BUSY)
724 return -EBUSY;
725 else if (res->com.state != RES_CQ_ALLOCATED)
726 return -EPERM;
727
728 return 0;
729 }
730
731 static int remove_srq_ok(struct res_srq *res)
732 {
733 if (res->com.state == RES_SRQ_BUSY)
734 return -EBUSY;
735 else if (res->com.state != RES_SRQ_ALLOCATED)
736 return -EPERM;
737
738 return 0;
739 }
740
741 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
742 {
743 switch (type) {
744 case RES_QP:
745 return remove_qp_ok((struct res_qp *)res);
746 case RES_CQ:
747 return remove_cq_ok((struct res_cq *)res);
748 case RES_SRQ:
749 return remove_srq_ok((struct res_srq *)res);
750 case RES_MPT:
751 return remove_mpt_ok((struct res_mpt *)res);
752 case RES_MTT:
753 return remove_mtt_ok((struct res_mtt *)res, extra);
754 case RES_MAC:
755 return -ENOSYS;
756 case RES_EQ:
757 return remove_eq_ok((struct res_eq *)res);
758 case RES_COUNTER:
759 return remove_counter_ok((struct res_counter *)res);
760 case RES_XRCD:
761 return remove_xrcdn_ok((struct res_xrcdn *)res);
762 case RES_FS_RULE:
763 return remove_fs_rule_ok((struct res_fs_rule *)res);
764 default:
765 return -EINVAL;
766 }
767 }
768
769 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
770 enum mlx4_resource type, int extra)
771 {
772 u64 i;
773 int err;
774 struct mlx4_priv *priv = mlx4_priv(dev);
775 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
776 struct res_common *r;
777
778 spin_lock_irq(mlx4_tlock(dev));
779 for (i = base; i < base + count; ++i) {
780 r = res_tracker_lookup(&tracker->res_tree[type], i);
781 if (!r) {
782 err = -ENOENT;
783 goto out;
784 }
785 if (r->owner != slave) {
786 err = -EPERM;
787 goto out;
788 }
789 err = remove_ok(r, type, extra);
790 if (err)
791 goto out;
792 }
793
794 for (i = base; i < base + count; ++i) {
795 r = res_tracker_lookup(&tracker->res_tree[type], i);
796 rb_erase(&r->node, &tracker->res_tree[type]);
797 list_del(&r->list);
798 kfree(r);
799 }
800 err = 0;
801
802 out:
803 spin_unlock_irq(mlx4_tlock(dev));
804
805 return err;
806 }
807
808 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
809 enum res_qp_states state, struct res_qp **qp,
810 int alloc)
811 {
812 struct mlx4_priv *priv = mlx4_priv(dev);
813 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
814 struct res_qp *r;
815 int err = 0;
816
817 spin_lock_irq(mlx4_tlock(dev));
818 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
819 if (!r)
820 err = -ENOENT;
821 else if (r->com.owner != slave)
822 err = -EPERM;
823 else {
824 switch (state) {
825 case RES_QP_BUSY:
826 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
827 __func__, r->com.res_id);
828 err = -EBUSY;
829 break;
830
831 case RES_QP_RESERVED:
832 if (r->com.state == RES_QP_MAPPED && !alloc)
833 break;
834
835 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
836 err = -EINVAL;
837 break;
838
839 case RES_QP_MAPPED:
840 if ((r->com.state == RES_QP_RESERVED && alloc) ||
841 r->com.state == RES_QP_HW)
842 break;
843 else {
844 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
845 r->com.res_id);
846 err = -EINVAL;
847 }
848
849 break;
850
851 case RES_QP_HW:
852 if (r->com.state != RES_QP_MAPPED)
853 err = -EINVAL;
854 break;
855 default:
856 err = -EINVAL;
857 }
858
859 if (!err) {
860 r->com.from_state = r->com.state;
861 r->com.to_state = state;
862 r->com.state = RES_QP_BUSY;
863 if (qp)
864 *qp = r;
865 }
866 }
867
868 spin_unlock_irq(mlx4_tlock(dev));
869
870 return err;
871 }
872
873 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
874 enum res_mpt_states state, struct res_mpt **mpt)
875 {
876 struct mlx4_priv *priv = mlx4_priv(dev);
877 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
878 struct res_mpt *r;
879 int err = 0;
880
881 spin_lock_irq(mlx4_tlock(dev));
882 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
883 if (!r)
884 err = -ENOENT;
885 else if (r->com.owner != slave)
886 err = -EPERM;
887 else {
888 switch (state) {
889 case RES_MPT_BUSY:
890 err = -EINVAL;
891 break;
892
893 case RES_MPT_RESERVED:
894 if (r->com.state != RES_MPT_MAPPED)
895 err = -EINVAL;
896 break;
897
898 case RES_MPT_MAPPED:
899 if (r->com.state != RES_MPT_RESERVED &&
900 r->com.state != RES_MPT_HW)
901 err = -EINVAL;
902 break;
903
904 case RES_MPT_HW:
905 if (r->com.state != RES_MPT_MAPPED)
906 err = -EINVAL;
907 break;
908 default:
909 err = -EINVAL;
910 }
911
912 if (!err) {
913 r->com.from_state = r->com.state;
914 r->com.to_state = state;
915 r->com.state = RES_MPT_BUSY;
916 if (mpt)
917 *mpt = r;
918 }
919 }
920
921 spin_unlock_irq(mlx4_tlock(dev));
922
923 return err;
924 }
925
926 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
927 enum res_eq_states state, struct res_eq **eq)
928 {
929 struct mlx4_priv *priv = mlx4_priv(dev);
930 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
931 struct res_eq *r;
932 int err = 0;
933
934 spin_lock_irq(mlx4_tlock(dev));
935 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
936 if (!r)
937 err = -ENOENT;
938 else if (r->com.owner != slave)
939 err = -EPERM;
940 else {
941 switch (state) {
942 case RES_EQ_BUSY:
943 err = -EINVAL;
944 break;
945
946 case RES_EQ_RESERVED:
947 if (r->com.state != RES_EQ_HW)
948 err = -EINVAL;
949 break;
950
951 case RES_EQ_HW:
952 if (r->com.state != RES_EQ_RESERVED)
953 err = -EINVAL;
954 break;
955
956 default:
957 err = -EINVAL;
958 }
959
960 if (!err) {
961 r->com.from_state = r->com.state;
962 r->com.to_state = state;
963 r->com.state = RES_EQ_BUSY;
964 if (eq)
965 *eq = r;
966 }
967 }
968
969 spin_unlock_irq(mlx4_tlock(dev));
970
971 return err;
972 }
973
974 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
975 enum res_cq_states state, struct res_cq **cq)
976 {
977 struct mlx4_priv *priv = mlx4_priv(dev);
978 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
979 struct res_cq *r;
980 int err;
981
982 spin_lock_irq(mlx4_tlock(dev));
983 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
984 if (!r)
985 err = -ENOENT;
986 else if (r->com.owner != slave)
987 err = -EPERM;
988 else {
989 switch (state) {
990 case RES_CQ_BUSY:
991 err = -EBUSY;
992 break;
993
994 case RES_CQ_ALLOCATED:
995 if (r->com.state != RES_CQ_HW)
996 err = -EINVAL;
997 else if (atomic_read(&r->ref_count))
998 err = -EBUSY;
999 else
1000 err = 0;
1001 break;
1002
1003 case RES_CQ_HW:
1004 if (r->com.state != RES_CQ_ALLOCATED)
1005 err = -EINVAL;
1006 else
1007 err = 0;
1008 break;
1009
1010 default:
1011 err = -EINVAL;
1012 }
1013
1014 if (!err) {
1015 r->com.from_state = r->com.state;
1016 r->com.to_state = state;
1017 r->com.state = RES_CQ_BUSY;
1018 if (cq)
1019 *cq = r;
1020 }
1021 }
1022
1023 spin_unlock_irq(mlx4_tlock(dev));
1024
1025 return err;
1026 }
1027
1028 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1029 enum res_cq_states state, struct res_srq **srq)
1030 {
1031 struct mlx4_priv *priv = mlx4_priv(dev);
1032 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1033 struct res_srq *r;
1034 int err = 0;
1035
1036 spin_lock_irq(mlx4_tlock(dev));
1037 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1038 if (!r)
1039 err = -ENOENT;
1040 else if (r->com.owner != slave)
1041 err = -EPERM;
1042 else {
1043 switch (state) {
1044 case RES_SRQ_BUSY:
1045 err = -EINVAL;
1046 break;
1047
1048 case RES_SRQ_ALLOCATED:
1049 if (r->com.state != RES_SRQ_HW)
1050 err = -EINVAL;
1051 else if (atomic_read(&r->ref_count))
1052 err = -EBUSY;
1053 break;
1054
1055 case RES_SRQ_HW:
1056 if (r->com.state != RES_SRQ_ALLOCATED)
1057 err = -EINVAL;
1058 break;
1059
1060 default:
1061 err = -EINVAL;
1062 }
1063
1064 if (!err) {
1065 r->com.from_state = r->com.state;
1066 r->com.to_state = state;
1067 r->com.state = RES_SRQ_BUSY;
1068 if (srq)
1069 *srq = r;
1070 }
1071 }
1072
1073 spin_unlock_irq(mlx4_tlock(dev));
1074
1075 return err;
1076 }
1077
1078 static void res_abort_move(struct mlx4_dev *dev, int slave,
1079 enum mlx4_resource type, int id)
1080 {
1081 struct mlx4_priv *priv = mlx4_priv(dev);
1082 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1083 struct res_common *r;
1084
1085 spin_lock_irq(mlx4_tlock(dev));
1086 r = res_tracker_lookup(&tracker->res_tree[type], id);
1087 if (r && (r->owner == slave))
1088 r->state = r->from_state;
1089 spin_unlock_irq(mlx4_tlock(dev));
1090 }
1091
1092 static void res_end_move(struct mlx4_dev *dev, int slave,
1093 enum mlx4_resource type, int id)
1094 {
1095 struct mlx4_priv *priv = mlx4_priv(dev);
1096 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1097 struct res_common *r;
1098
1099 spin_lock_irq(mlx4_tlock(dev));
1100 r = res_tracker_lookup(&tracker->res_tree[type], id);
1101 if (r && (r->owner == slave))
1102 r->state = r->to_state;
1103 spin_unlock_irq(mlx4_tlock(dev));
1104 }
1105
1106 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1107 {
1108 return mlx4_is_qp_reserved(dev, qpn) &&
1109 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1110 }
1111
1112 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1113 u64 in_param, u64 *out_param)
1114 {
1115 int err;
1116 int count;
1117 int align;
1118 int base;
1119 int qpn;
1120
1121 switch (op) {
1122 case RES_OP_RESERVE:
1123 count = get_param_l(&in_param);
1124 align = get_param_h(&in_param);
1125 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1126 if (err)
1127 return err;
1128
1129 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1130 if (err) {
1131 __mlx4_qp_release_range(dev, base, count);
1132 return err;
1133 }
1134 set_param_l(out_param, base);
1135 break;
1136 case RES_OP_MAP_ICM:
1137 qpn = get_param_l(&in_param) & 0x7fffff;
1138 if (valid_reserved(dev, slave, qpn)) {
1139 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1140 if (err)
1141 return err;
1142 }
1143
1144 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1145 NULL, 1);
1146 if (err)
1147 return err;
1148
1149 if (!valid_reserved(dev, slave, qpn)) {
1150 err = __mlx4_qp_alloc_icm(dev, qpn);
1151 if (err) {
1152 res_abort_move(dev, slave, RES_QP, qpn);
1153 return err;
1154 }
1155 }
1156
1157 res_end_move(dev, slave, RES_QP, qpn);
1158 break;
1159
1160 default:
1161 err = -EINVAL;
1162 break;
1163 }
1164 return err;
1165 }
1166
1167 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1168 u64 in_param, u64 *out_param)
1169 {
1170 int err = -EINVAL;
1171 int base;
1172 int order;
1173
1174 if (op != RES_OP_RESERVE_AND_MAP)
1175 return err;
1176
1177 order = get_param_l(&in_param);
1178 base = __mlx4_alloc_mtt_range(dev, order);
1179 if (base == -1)
1180 return -ENOMEM;
1181
1182 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1183 if (err)
1184 __mlx4_free_mtt_range(dev, base, order);
1185 else
1186 set_param_l(out_param, base);
1187
1188 return err;
1189 }
1190
1191 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1192 u64 in_param, u64 *out_param)
1193 {
1194 int err = -EINVAL;
1195 int index;
1196 int id;
1197 struct res_mpt *mpt;
1198
1199 switch (op) {
1200 case RES_OP_RESERVE:
1201 index = __mlx4_mr_reserve(dev);
1202 if (index == -1)
1203 break;
1204 id = index & mpt_mask(dev);
1205
1206 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1207 if (err) {
1208 __mlx4_mr_release(dev, index);
1209 break;
1210 }
1211 set_param_l(out_param, index);
1212 break;
1213 case RES_OP_MAP_ICM:
1214 index = get_param_l(&in_param);
1215 id = index & mpt_mask(dev);
1216 err = mr_res_start_move_to(dev, slave, id,
1217 RES_MPT_MAPPED, &mpt);
1218 if (err)
1219 return err;
1220
1221 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1222 if (err) {
1223 res_abort_move(dev, slave, RES_MPT, id);
1224 return err;
1225 }
1226
1227 res_end_move(dev, slave, RES_MPT, id);
1228 break;
1229 }
1230 return err;
1231 }
1232
1233 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1234 u64 in_param, u64 *out_param)
1235 {
1236 int cqn;
1237 int err;
1238
1239 switch (op) {
1240 case RES_OP_RESERVE_AND_MAP:
1241 err = __mlx4_cq_alloc_icm(dev, &cqn);
1242 if (err)
1243 break;
1244
1245 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1246 if (err) {
1247 __mlx4_cq_free_icm(dev, cqn);
1248 break;
1249 }
1250
1251 set_param_l(out_param, cqn);
1252 break;
1253
1254 default:
1255 err = -EINVAL;
1256 }
1257
1258 return err;
1259 }
1260
1261 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1262 u64 in_param, u64 *out_param)
1263 {
1264 int srqn;
1265 int err;
1266
1267 switch (op) {
1268 case RES_OP_RESERVE_AND_MAP:
1269 err = __mlx4_srq_alloc_icm(dev, &srqn);
1270 if (err)
1271 break;
1272
1273 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1274 if (err) {
1275 __mlx4_srq_free_icm(dev, srqn);
1276 break;
1277 }
1278
1279 set_param_l(out_param, srqn);
1280 break;
1281
1282 default:
1283 err = -EINVAL;
1284 }
1285
1286 return err;
1287 }
1288
1289 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1290 {
1291 struct mlx4_priv *priv = mlx4_priv(dev);
1292 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1293 struct mac_res *res;
1294
1295 res = kzalloc(sizeof *res, GFP_KERNEL);
1296 if (!res)
1297 return -ENOMEM;
1298 res->mac = mac;
1299 res->port = (u8) port;
1300 list_add_tail(&res->list,
1301 &tracker->slave_list[slave].res_list[RES_MAC]);
1302 return 0;
1303 }
1304
1305 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1306 int port)
1307 {
1308 struct mlx4_priv *priv = mlx4_priv(dev);
1309 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1310 struct list_head *mac_list =
1311 &tracker->slave_list[slave].res_list[RES_MAC];
1312 struct mac_res *res, *tmp;
1313
1314 list_for_each_entry_safe(res, tmp, mac_list, list) {
1315 if (res->mac == mac && res->port == (u8) port) {
1316 list_del(&res->list);
1317 kfree(res);
1318 break;
1319 }
1320 }
1321 }
1322
1323 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1324 {
1325 struct mlx4_priv *priv = mlx4_priv(dev);
1326 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1327 struct list_head *mac_list =
1328 &tracker->slave_list[slave].res_list[RES_MAC];
1329 struct mac_res *res, *tmp;
1330
1331 list_for_each_entry_safe(res, tmp, mac_list, list) {
1332 list_del(&res->list);
1333 __mlx4_unregister_mac(dev, res->port, res->mac);
1334 kfree(res);
1335 }
1336 }
1337
1338 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1339 u64 in_param, u64 *out_param)
1340 {
1341 int err = -EINVAL;
1342 int port;
1343 u64 mac;
1344
1345 if (op != RES_OP_RESERVE_AND_MAP)
1346 return err;
1347
1348 port = get_param_l(out_param);
1349 mac = in_param;
1350
1351 err = __mlx4_register_mac(dev, port, mac);
1352 if (err >= 0) {
1353 set_param_l(out_param, err);
1354 err = 0;
1355 }
1356
1357 if (!err) {
1358 err = mac_add_to_slave(dev, slave, mac, port);
1359 if (err)
1360 __mlx4_unregister_mac(dev, port, mac);
1361 }
1362 return err;
1363 }
1364
1365 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1366 u64 in_param, u64 *out_param)
1367 {
1368 return 0;
1369 }
1370
1371 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1372 u64 in_param, u64 *out_param)
1373 {
1374 u32 index;
1375 int err;
1376
1377 if (op != RES_OP_RESERVE)
1378 return -EINVAL;
1379
1380 err = __mlx4_counter_alloc(dev, &index);
1381 if (err)
1382 return err;
1383
1384 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1385 if (err)
1386 __mlx4_counter_free(dev, index);
1387 else
1388 set_param_l(out_param, index);
1389
1390 return err;
1391 }
1392
1393 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1394 u64 in_param, u64 *out_param)
1395 {
1396 u32 xrcdn;
1397 int err;
1398
1399 if (op != RES_OP_RESERVE)
1400 return -EINVAL;
1401
1402 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1403 if (err)
1404 return err;
1405
1406 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1407 if (err)
1408 __mlx4_xrcd_free(dev, xrcdn);
1409 else
1410 set_param_l(out_param, xrcdn);
1411
1412 return err;
1413 }
1414
1415 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1416 struct mlx4_vhcr *vhcr,
1417 struct mlx4_cmd_mailbox *inbox,
1418 struct mlx4_cmd_mailbox *outbox,
1419 struct mlx4_cmd_info *cmd)
1420 {
1421 int err;
1422 int alop = vhcr->op_modifier;
1423
1424 switch (vhcr->in_modifier) {
1425 case RES_QP:
1426 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1427 vhcr->in_param, &vhcr->out_param);
1428 break;
1429
1430 case RES_MTT:
1431 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1432 vhcr->in_param, &vhcr->out_param);
1433 break;
1434
1435 case RES_MPT:
1436 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1437 vhcr->in_param, &vhcr->out_param);
1438 break;
1439
1440 case RES_CQ:
1441 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1442 vhcr->in_param, &vhcr->out_param);
1443 break;
1444
1445 case RES_SRQ:
1446 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1447 vhcr->in_param, &vhcr->out_param);
1448 break;
1449
1450 case RES_MAC:
1451 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1452 vhcr->in_param, &vhcr->out_param);
1453 break;
1454
1455 case RES_VLAN:
1456 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1457 vhcr->in_param, &vhcr->out_param);
1458 break;
1459
1460 case RES_COUNTER:
1461 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1462 vhcr->in_param, &vhcr->out_param);
1463 break;
1464
1465 case RES_XRCD:
1466 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1467 vhcr->in_param, &vhcr->out_param);
1468 break;
1469
1470 default:
1471 err = -EINVAL;
1472 break;
1473 }
1474
1475 return err;
1476 }
1477
1478 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1479 u64 in_param)
1480 {
1481 int err;
1482 int count;
1483 int base;
1484 int qpn;
1485
1486 switch (op) {
1487 case RES_OP_RESERVE:
1488 base = get_param_l(&in_param) & 0x7fffff;
1489 count = get_param_h(&in_param);
1490 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1491 if (err)
1492 break;
1493 __mlx4_qp_release_range(dev, base, count);
1494 break;
1495 case RES_OP_MAP_ICM:
1496 qpn = get_param_l(&in_param) & 0x7fffff;
1497 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1498 NULL, 0);
1499 if (err)
1500 return err;
1501
1502 if (!valid_reserved(dev, slave, qpn))
1503 __mlx4_qp_free_icm(dev, qpn);
1504
1505 res_end_move(dev, slave, RES_QP, qpn);
1506
1507 if (valid_reserved(dev, slave, qpn))
1508 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1509 break;
1510 default:
1511 err = -EINVAL;
1512 break;
1513 }
1514 return err;
1515 }
1516
1517 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1518 u64 in_param, u64 *out_param)
1519 {
1520 int err = -EINVAL;
1521 int base;
1522 int order;
1523
1524 if (op != RES_OP_RESERVE_AND_MAP)
1525 return err;
1526
1527 base = get_param_l(&in_param);
1528 order = get_param_h(&in_param);
1529 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1530 if (!err)
1531 __mlx4_free_mtt_range(dev, base, order);
1532 return err;
1533 }
1534
1535 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1536 u64 in_param)
1537 {
1538 int err = -EINVAL;
1539 int index;
1540 int id;
1541 struct res_mpt *mpt;
1542
1543 switch (op) {
1544 case RES_OP_RESERVE:
1545 index = get_param_l(&in_param);
1546 id = index & mpt_mask(dev);
1547 err = get_res(dev, slave, id, RES_MPT, &mpt);
1548 if (err)
1549 break;
1550 index = mpt->key;
1551 put_res(dev, slave, id, RES_MPT);
1552
1553 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1554 if (err)
1555 break;
1556 __mlx4_mr_release(dev, index);
1557 break;
1558 case RES_OP_MAP_ICM:
1559 index = get_param_l(&in_param);
1560 id = index & mpt_mask(dev);
1561 err = mr_res_start_move_to(dev, slave, id,
1562 RES_MPT_RESERVED, &mpt);
1563 if (err)
1564 return err;
1565
1566 __mlx4_mr_free_icm(dev, mpt->key);
1567 res_end_move(dev, slave, RES_MPT, id);
1568 return err;
1569 break;
1570 default:
1571 err = -EINVAL;
1572 break;
1573 }
1574 return err;
1575 }
1576
1577 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1578 u64 in_param, u64 *out_param)
1579 {
1580 int cqn;
1581 int err;
1582
1583 switch (op) {
1584 case RES_OP_RESERVE_AND_MAP:
1585 cqn = get_param_l(&in_param);
1586 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1587 if (err)
1588 break;
1589
1590 __mlx4_cq_free_icm(dev, cqn);
1591 break;
1592
1593 default:
1594 err = -EINVAL;
1595 break;
1596 }
1597
1598 return err;
1599 }
1600
1601 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1602 u64 in_param, u64 *out_param)
1603 {
1604 int srqn;
1605 int err;
1606
1607 switch (op) {
1608 case RES_OP_RESERVE_AND_MAP:
1609 srqn = get_param_l(&in_param);
1610 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1611 if (err)
1612 break;
1613
1614 __mlx4_srq_free_icm(dev, srqn);
1615 break;
1616
1617 default:
1618 err = -EINVAL;
1619 break;
1620 }
1621
1622 return err;
1623 }
1624
1625 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1626 u64 in_param, u64 *out_param)
1627 {
1628 int port;
1629 int err = 0;
1630
1631 switch (op) {
1632 case RES_OP_RESERVE_AND_MAP:
1633 port = get_param_l(out_param);
1634 mac_del_from_slave(dev, slave, in_param, port);
1635 __mlx4_unregister_mac(dev, port, in_param);
1636 break;
1637 default:
1638 err = -EINVAL;
1639 break;
1640 }
1641
1642 return err;
1643
1644 }
1645
1646 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1647 u64 in_param, u64 *out_param)
1648 {
1649 return 0;
1650 }
1651
1652 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1653 u64 in_param, u64 *out_param)
1654 {
1655 int index;
1656 int err;
1657
1658 if (op != RES_OP_RESERVE)
1659 return -EINVAL;
1660
1661 index = get_param_l(&in_param);
1662 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1663 if (err)
1664 return err;
1665
1666 __mlx4_counter_free(dev, index);
1667
1668 return err;
1669 }
1670
1671 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1672 u64 in_param, u64 *out_param)
1673 {
1674 int xrcdn;
1675 int err;
1676
1677 if (op != RES_OP_RESERVE)
1678 return -EINVAL;
1679
1680 xrcdn = get_param_l(&in_param);
1681 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1682 if (err)
1683 return err;
1684
1685 __mlx4_xrcd_free(dev, xrcdn);
1686
1687 return err;
1688 }
1689
1690 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1691 struct mlx4_vhcr *vhcr,
1692 struct mlx4_cmd_mailbox *inbox,
1693 struct mlx4_cmd_mailbox *outbox,
1694 struct mlx4_cmd_info *cmd)
1695 {
1696 int err = -EINVAL;
1697 int alop = vhcr->op_modifier;
1698
1699 switch (vhcr->in_modifier) {
1700 case RES_QP:
1701 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1702 vhcr->in_param);
1703 break;
1704
1705 case RES_MTT:
1706 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1707 vhcr->in_param, &vhcr->out_param);
1708 break;
1709
1710 case RES_MPT:
1711 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1712 vhcr->in_param);
1713 break;
1714
1715 case RES_CQ:
1716 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1717 vhcr->in_param, &vhcr->out_param);
1718 break;
1719
1720 case RES_SRQ:
1721 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1722 vhcr->in_param, &vhcr->out_param);
1723 break;
1724
1725 case RES_MAC:
1726 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1727 vhcr->in_param, &vhcr->out_param);
1728 break;
1729
1730 case RES_VLAN:
1731 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1732 vhcr->in_param, &vhcr->out_param);
1733 break;
1734
1735 case RES_COUNTER:
1736 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1737 vhcr->in_param, &vhcr->out_param);
1738 break;
1739
1740 case RES_XRCD:
1741 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1742 vhcr->in_param, &vhcr->out_param);
1743
1744 default:
1745 break;
1746 }
1747 return err;
1748 }
1749
1750 /* ugly but other choices are uglier */
1751 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1752 {
1753 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1754 }
1755
1756 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1757 {
1758 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1759 }
1760
1761 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1762 {
1763 return be32_to_cpu(mpt->mtt_sz);
1764 }
1765
1766 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1767 {
1768 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1769 }
1770
1771 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1772 {
1773 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1774 }
1775
1776 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1777 {
1778 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1779 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1780 int log_sq_sride = qpc->sq_size_stride & 7;
1781 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1782 int log_rq_stride = qpc->rq_size_stride & 7;
1783 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1784 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1785 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1786 int sq_size;
1787 int rq_size;
1788 int total_pages;
1789 int total_mem;
1790 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1791
1792 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1793 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1794 total_mem = sq_size + rq_size;
1795 total_pages =
1796 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1797 page_shift);
1798
1799 return total_pages;
1800 }
1801
1802 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1803 int size, struct res_mtt *mtt)
1804 {
1805 int res_start = mtt->com.res_id;
1806 int res_size = (1 << mtt->order);
1807
1808 if (start < res_start || start + size > res_start + res_size)
1809 return -EPERM;
1810 return 0;
1811 }
1812
1813 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1814 struct mlx4_vhcr *vhcr,
1815 struct mlx4_cmd_mailbox *inbox,
1816 struct mlx4_cmd_mailbox *outbox,
1817 struct mlx4_cmd_info *cmd)
1818 {
1819 int err;
1820 int index = vhcr->in_modifier;
1821 struct res_mtt *mtt;
1822 struct res_mpt *mpt;
1823 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1824 int phys;
1825 int id;
1826
1827 id = index & mpt_mask(dev);
1828 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1829 if (err)
1830 return err;
1831
1832 phys = mr_phys_mpt(inbox->buf);
1833 if (!phys) {
1834 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1835 if (err)
1836 goto ex_abort;
1837
1838 err = check_mtt_range(dev, slave, mtt_base,
1839 mr_get_mtt_size(inbox->buf), mtt);
1840 if (err)
1841 goto ex_put;
1842
1843 mpt->mtt = mtt;
1844 }
1845
1846 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1847 if (err)
1848 goto ex_put;
1849
1850 if (!phys) {
1851 atomic_inc(&mtt->ref_count);
1852 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1853 }
1854
1855 res_end_move(dev, slave, RES_MPT, id);
1856 return 0;
1857
1858 ex_put:
1859 if (!phys)
1860 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1861 ex_abort:
1862 res_abort_move(dev, slave, RES_MPT, id);
1863
1864 return err;
1865 }
1866
1867 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1868 struct mlx4_vhcr *vhcr,
1869 struct mlx4_cmd_mailbox *inbox,
1870 struct mlx4_cmd_mailbox *outbox,
1871 struct mlx4_cmd_info *cmd)
1872 {
1873 int err;
1874 int index = vhcr->in_modifier;
1875 struct res_mpt *mpt;
1876 int id;
1877
1878 id = index & mpt_mask(dev);
1879 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1880 if (err)
1881 return err;
1882
1883 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1884 if (err)
1885 goto ex_abort;
1886
1887 if (mpt->mtt)
1888 atomic_dec(&mpt->mtt->ref_count);
1889
1890 res_end_move(dev, slave, RES_MPT, id);
1891 return 0;
1892
1893 ex_abort:
1894 res_abort_move(dev, slave, RES_MPT, id);
1895
1896 return err;
1897 }
1898
1899 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1900 struct mlx4_vhcr *vhcr,
1901 struct mlx4_cmd_mailbox *inbox,
1902 struct mlx4_cmd_mailbox *outbox,
1903 struct mlx4_cmd_info *cmd)
1904 {
1905 int err;
1906 int index = vhcr->in_modifier;
1907 struct res_mpt *mpt;
1908 int id;
1909
1910 id = index & mpt_mask(dev);
1911 err = get_res(dev, slave, id, RES_MPT, &mpt);
1912 if (err)
1913 return err;
1914
1915 if (mpt->com.from_state != RES_MPT_HW) {
1916 err = -EBUSY;
1917 goto out;
1918 }
1919
1920 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1921
1922 out:
1923 put_res(dev, slave, id, RES_MPT);
1924 return err;
1925 }
1926
1927 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1928 {
1929 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1930 }
1931
1932 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1933 {
1934 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1935 }
1936
1937 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1938 {
1939 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1940 }
1941
1942 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1943 struct mlx4_vhcr *vhcr,
1944 struct mlx4_cmd_mailbox *inbox,
1945 struct mlx4_cmd_mailbox *outbox,
1946 struct mlx4_cmd_info *cmd)
1947 {
1948 int err;
1949 int qpn = vhcr->in_modifier & 0x7fffff;
1950 struct res_mtt *mtt;
1951 struct res_qp *qp;
1952 struct mlx4_qp_context *qpc = inbox->buf + 8;
1953 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
1954 int mtt_size = qp_get_mtt_size(qpc);
1955 struct res_cq *rcq;
1956 struct res_cq *scq;
1957 int rcqn = qp_get_rcqn(qpc);
1958 int scqn = qp_get_scqn(qpc);
1959 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1960 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1961 struct res_srq *srq;
1962 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1963
1964 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1965 if (err)
1966 return err;
1967 qp->local_qpn = local_qpn;
1968
1969 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1970 if (err)
1971 goto ex_abort;
1972
1973 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1974 if (err)
1975 goto ex_put_mtt;
1976
1977 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1978 if (err)
1979 goto ex_put_mtt;
1980
1981 if (scqn != rcqn) {
1982 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1983 if (err)
1984 goto ex_put_rcq;
1985 } else
1986 scq = rcq;
1987
1988 if (use_srq) {
1989 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1990 if (err)
1991 goto ex_put_scq;
1992 }
1993
1994 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1995 if (err)
1996 goto ex_put_srq;
1997 atomic_inc(&mtt->ref_count);
1998 qp->mtt = mtt;
1999 atomic_inc(&rcq->ref_count);
2000 qp->rcq = rcq;
2001 atomic_inc(&scq->ref_count);
2002 qp->scq = scq;
2003
2004 if (scqn != rcqn)
2005 put_res(dev, slave, scqn, RES_CQ);
2006
2007 if (use_srq) {
2008 atomic_inc(&srq->ref_count);
2009 put_res(dev, slave, srqn, RES_SRQ);
2010 qp->srq = srq;
2011 }
2012 put_res(dev, slave, rcqn, RES_CQ);
2013 put_res(dev, slave, mtt_base, RES_MTT);
2014 res_end_move(dev, slave, RES_QP, qpn);
2015
2016 return 0;
2017
2018 ex_put_srq:
2019 if (use_srq)
2020 put_res(dev, slave, srqn, RES_SRQ);
2021 ex_put_scq:
2022 if (scqn != rcqn)
2023 put_res(dev, slave, scqn, RES_CQ);
2024 ex_put_rcq:
2025 put_res(dev, slave, rcqn, RES_CQ);
2026 ex_put_mtt:
2027 put_res(dev, slave, mtt_base, RES_MTT);
2028 ex_abort:
2029 res_abort_move(dev, slave, RES_QP, qpn);
2030
2031 return err;
2032 }
2033
2034 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2035 {
2036 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2037 }
2038
2039 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2040 {
2041 int log_eq_size = eqc->log_eq_size & 0x1f;
2042 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2043
2044 if (log_eq_size + 5 < page_shift)
2045 return 1;
2046
2047 return 1 << (log_eq_size + 5 - page_shift);
2048 }
2049
2050 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2051 {
2052 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2053 }
2054
2055 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2056 {
2057 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2058 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2059
2060 if (log_cq_size + 5 < page_shift)
2061 return 1;
2062
2063 return 1 << (log_cq_size + 5 - page_shift);
2064 }
2065
2066 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2067 struct mlx4_vhcr *vhcr,
2068 struct mlx4_cmd_mailbox *inbox,
2069 struct mlx4_cmd_mailbox *outbox,
2070 struct mlx4_cmd_info *cmd)
2071 {
2072 int err;
2073 int eqn = vhcr->in_modifier;
2074 int res_id = (slave << 8) | eqn;
2075 struct mlx4_eq_context *eqc = inbox->buf;
2076 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2077 int mtt_size = eq_get_mtt_size(eqc);
2078 struct res_eq *eq;
2079 struct res_mtt *mtt;
2080
2081 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2082 if (err)
2083 return err;
2084 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2085 if (err)
2086 goto out_add;
2087
2088 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2089 if (err)
2090 goto out_move;
2091
2092 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2093 if (err)
2094 goto out_put;
2095
2096 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2097 if (err)
2098 goto out_put;
2099
2100 atomic_inc(&mtt->ref_count);
2101 eq->mtt = mtt;
2102 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2103 res_end_move(dev, slave, RES_EQ, res_id);
2104 return 0;
2105
2106 out_put:
2107 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2108 out_move:
2109 res_abort_move(dev, slave, RES_EQ, res_id);
2110 out_add:
2111 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2112 return err;
2113 }
2114
2115 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2116 int len, struct res_mtt **res)
2117 {
2118 struct mlx4_priv *priv = mlx4_priv(dev);
2119 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2120 struct res_mtt *mtt;
2121 int err = -EINVAL;
2122
2123 spin_lock_irq(mlx4_tlock(dev));
2124 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2125 com.list) {
2126 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2127 *res = mtt;
2128 mtt->com.from_state = mtt->com.state;
2129 mtt->com.state = RES_MTT_BUSY;
2130 err = 0;
2131 break;
2132 }
2133 }
2134 spin_unlock_irq(mlx4_tlock(dev));
2135
2136 return err;
2137 }
2138
2139 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2140 struct mlx4_vhcr *vhcr,
2141 struct mlx4_cmd_mailbox *inbox,
2142 struct mlx4_cmd_mailbox *outbox,
2143 struct mlx4_cmd_info *cmd)
2144 {
2145 struct mlx4_mtt mtt;
2146 __be64 *page_list = inbox->buf;
2147 u64 *pg_list = (u64 *)page_list;
2148 int i;
2149 struct res_mtt *rmtt = NULL;
2150 int start = be64_to_cpu(page_list[0]);
2151 int npages = vhcr->in_modifier;
2152 int err;
2153
2154 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2155 if (err)
2156 return err;
2157
2158 /* Call the SW implementation of write_mtt:
2159 * - Prepare a dummy mtt struct
2160 * - Translate inbox contents to simple addresses in host endianess */
2161 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2162 we don't really use it */
2163 mtt.order = 0;
2164 mtt.page_shift = 0;
2165 for (i = 0; i < npages; ++i)
2166 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2167
2168 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2169 ((u64 *)page_list + 2));
2170
2171 if (rmtt)
2172 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2173
2174 return err;
2175 }
2176
2177 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2178 struct mlx4_vhcr *vhcr,
2179 struct mlx4_cmd_mailbox *inbox,
2180 struct mlx4_cmd_mailbox *outbox,
2181 struct mlx4_cmd_info *cmd)
2182 {
2183 int eqn = vhcr->in_modifier;
2184 int res_id = eqn | (slave << 8);
2185 struct res_eq *eq;
2186 int err;
2187
2188 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2189 if (err)
2190 return err;
2191
2192 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2193 if (err)
2194 goto ex_abort;
2195
2196 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2197 if (err)
2198 goto ex_put;
2199
2200 atomic_dec(&eq->mtt->ref_count);
2201 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2202 res_end_move(dev, slave, RES_EQ, res_id);
2203 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2204
2205 return 0;
2206
2207 ex_put:
2208 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2209 ex_abort:
2210 res_abort_move(dev, slave, RES_EQ, res_id);
2211
2212 return err;
2213 }
2214
2215 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2216 {
2217 struct mlx4_priv *priv = mlx4_priv(dev);
2218 struct mlx4_slave_event_eq_info *event_eq;
2219 struct mlx4_cmd_mailbox *mailbox;
2220 u32 in_modifier = 0;
2221 int err;
2222 int res_id;
2223 struct res_eq *req;
2224
2225 if (!priv->mfunc.master.slave_state)
2226 return -EINVAL;
2227
2228 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2229
2230 /* Create the event only if the slave is registered */
2231 if (event_eq->eqn < 0)
2232 return 0;
2233
2234 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2235 res_id = (slave << 8) | event_eq->eqn;
2236 err = get_res(dev, slave, res_id, RES_EQ, &req);
2237 if (err)
2238 goto unlock;
2239
2240 if (req->com.from_state != RES_EQ_HW) {
2241 err = -EINVAL;
2242 goto put;
2243 }
2244
2245 mailbox = mlx4_alloc_cmd_mailbox(dev);
2246 if (IS_ERR(mailbox)) {
2247 err = PTR_ERR(mailbox);
2248 goto put;
2249 }
2250
2251 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2252 ++event_eq->token;
2253 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2254 }
2255
2256 memcpy(mailbox->buf, (u8 *) eqe, 28);
2257
2258 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2259
2260 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2261 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2262 MLX4_CMD_NATIVE);
2263
2264 put_res(dev, slave, res_id, RES_EQ);
2265 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2266 mlx4_free_cmd_mailbox(dev, mailbox);
2267 return err;
2268
2269 put:
2270 put_res(dev, slave, res_id, RES_EQ);
2271
2272 unlock:
2273 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2274 return err;
2275 }
2276
2277 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2278 struct mlx4_vhcr *vhcr,
2279 struct mlx4_cmd_mailbox *inbox,
2280 struct mlx4_cmd_mailbox *outbox,
2281 struct mlx4_cmd_info *cmd)
2282 {
2283 int eqn = vhcr->in_modifier;
2284 int res_id = eqn | (slave << 8);
2285 struct res_eq *eq;
2286 int err;
2287
2288 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2289 if (err)
2290 return err;
2291
2292 if (eq->com.from_state != RES_EQ_HW) {
2293 err = -EINVAL;
2294 goto ex_put;
2295 }
2296
2297 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2298
2299 ex_put:
2300 put_res(dev, slave, res_id, RES_EQ);
2301 return err;
2302 }
2303
2304 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2305 struct mlx4_vhcr *vhcr,
2306 struct mlx4_cmd_mailbox *inbox,
2307 struct mlx4_cmd_mailbox *outbox,
2308 struct mlx4_cmd_info *cmd)
2309 {
2310 int err;
2311 int cqn = vhcr->in_modifier;
2312 struct mlx4_cq_context *cqc = inbox->buf;
2313 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2314 struct res_cq *cq;
2315 struct res_mtt *mtt;
2316
2317 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2318 if (err)
2319 return err;
2320 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2321 if (err)
2322 goto out_move;
2323 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2324 if (err)
2325 goto out_put;
2326 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2327 if (err)
2328 goto out_put;
2329 atomic_inc(&mtt->ref_count);
2330 cq->mtt = mtt;
2331 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2332 res_end_move(dev, slave, RES_CQ, cqn);
2333 return 0;
2334
2335 out_put:
2336 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2337 out_move:
2338 res_abort_move(dev, slave, RES_CQ, cqn);
2339 return err;
2340 }
2341
2342 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2343 struct mlx4_vhcr *vhcr,
2344 struct mlx4_cmd_mailbox *inbox,
2345 struct mlx4_cmd_mailbox *outbox,
2346 struct mlx4_cmd_info *cmd)
2347 {
2348 int err;
2349 int cqn = vhcr->in_modifier;
2350 struct res_cq *cq;
2351
2352 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2353 if (err)
2354 return err;
2355 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2356 if (err)
2357 goto out_move;
2358 atomic_dec(&cq->mtt->ref_count);
2359 res_end_move(dev, slave, RES_CQ, cqn);
2360 return 0;
2361
2362 out_move:
2363 res_abort_move(dev, slave, RES_CQ, cqn);
2364 return err;
2365 }
2366
2367 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2368 struct mlx4_vhcr *vhcr,
2369 struct mlx4_cmd_mailbox *inbox,
2370 struct mlx4_cmd_mailbox *outbox,
2371 struct mlx4_cmd_info *cmd)
2372 {
2373 int cqn = vhcr->in_modifier;
2374 struct res_cq *cq;
2375 int err;
2376
2377 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2378 if (err)
2379 return err;
2380
2381 if (cq->com.from_state != RES_CQ_HW)
2382 goto ex_put;
2383
2384 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2385 ex_put:
2386 put_res(dev, slave, cqn, RES_CQ);
2387
2388 return err;
2389 }
2390
2391 static int handle_resize(struct mlx4_dev *dev, int slave,
2392 struct mlx4_vhcr *vhcr,
2393 struct mlx4_cmd_mailbox *inbox,
2394 struct mlx4_cmd_mailbox *outbox,
2395 struct mlx4_cmd_info *cmd,
2396 struct res_cq *cq)
2397 {
2398 int err;
2399 struct res_mtt *orig_mtt;
2400 struct res_mtt *mtt;
2401 struct mlx4_cq_context *cqc = inbox->buf;
2402 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2403
2404 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2405 if (err)
2406 return err;
2407
2408 if (orig_mtt != cq->mtt) {
2409 err = -EINVAL;
2410 goto ex_put;
2411 }
2412
2413 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2414 if (err)
2415 goto ex_put;
2416
2417 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2418 if (err)
2419 goto ex_put1;
2420 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2421 if (err)
2422 goto ex_put1;
2423 atomic_dec(&orig_mtt->ref_count);
2424 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2425 atomic_inc(&mtt->ref_count);
2426 cq->mtt = mtt;
2427 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2428 return 0;
2429
2430 ex_put1:
2431 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2432 ex_put:
2433 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2434
2435 return err;
2436
2437 }
2438
2439 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2440 struct mlx4_vhcr *vhcr,
2441 struct mlx4_cmd_mailbox *inbox,
2442 struct mlx4_cmd_mailbox *outbox,
2443 struct mlx4_cmd_info *cmd)
2444 {
2445 int cqn = vhcr->in_modifier;
2446 struct res_cq *cq;
2447 int err;
2448
2449 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2450 if (err)
2451 return err;
2452
2453 if (cq->com.from_state != RES_CQ_HW)
2454 goto ex_put;
2455
2456 if (vhcr->op_modifier == 0) {
2457 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2458 goto ex_put;
2459 }
2460
2461 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2462 ex_put:
2463 put_res(dev, slave, cqn, RES_CQ);
2464
2465 return err;
2466 }
2467
2468 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2469 {
2470 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2471 int log_rq_stride = srqc->logstride & 7;
2472 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2473
2474 if (log_srq_size + log_rq_stride + 4 < page_shift)
2475 return 1;
2476
2477 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2478 }
2479
2480 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2481 struct mlx4_vhcr *vhcr,
2482 struct mlx4_cmd_mailbox *inbox,
2483 struct mlx4_cmd_mailbox *outbox,
2484 struct mlx4_cmd_info *cmd)
2485 {
2486 int err;
2487 int srqn = vhcr->in_modifier;
2488 struct res_mtt *mtt;
2489 struct res_srq *srq;
2490 struct mlx4_srq_context *srqc = inbox->buf;
2491 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2492
2493 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2494 return -EINVAL;
2495
2496 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2497 if (err)
2498 return err;
2499 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2500 if (err)
2501 goto ex_abort;
2502 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2503 mtt);
2504 if (err)
2505 goto ex_put_mtt;
2506
2507 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2508 if (err)
2509 goto ex_put_mtt;
2510
2511 atomic_inc(&mtt->ref_count);
2512 srq->mtt = mtt;
2513 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2514 res_end_move(dev, slave, RES_SRQ, srqn);
2515 return 0;
2516
2517 ex_put_mtt:
2518 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2519 ex_abort:
2520 res_abort_move(dev, slave, RES_SRQ, srqn);
2521
2522 return err;
2523 }
2524
2525 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2526 struct mlx4_vhcr *vhcr,
2527 struct mlx4_cmd_mailbox *inbox,
2528 struct mlx4_cmd_mailbox *outbox,
2529 struct mlx4_cmd_info *cmd)
2530 {
2531 int err;
2532 int srqn = vhcr->in_modifier;
2533 struct res_srq *srq;
2534
2535 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2536 if (err)
2537 return err;
2538 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2539 if (err)
2540 goto ex_abort;
2541 atomic_dec(&srq->mtt->ref_count);
2542 if (srq->cq)
2543 atomic_dec(&srq->cq->ref_count);
2544 res_end_move(dev, slave, RES_SRQ, srqn);
2545
2546 return 0;
2547
2548 ex_abort:
2549 res_abort_move(dev, slave, RES_SRQ, srqn);
2550
2551 return err;
2552 }
2553
2554 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2555 struct mlx4_vhcr *vhcr,
2556 struct mlx4_cmd_mailbox *inbox,
2557 struct mlx4_cmd_mailbox *outbox,
2558 struct mlx4_cmd_info *cmd)
2559 {
2560 int err;
2561 int srqn = vhcr->in_modifier;
2562 struct res_srq *srq;
2563
2564 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2565 if (err)
2566 return err;
2567 if (srq->com.from_state != RES_SRQ_HW) {
2568 err = -EBUSY;
2569 goto out;
2570 }
2571 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2572 out:
2573 put_res(dev, slave, srqn, RES_SRQ);
2574 return err;
2575 }
2576
2577 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2578 struct mlx4_vhcr *vhcr,
2579 struct mlx4_cmd_mailbox *inbox,
2580 struct mlx4_cmd_mailbox *outbox,
2581 struct mlx4_cmd_info *cmd)
2582 {
2583 int err;
2584 int srqn = vhcr->in_modifier;
2585 struct res_srq *srq;
2586
2587 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2588 if (err)
2589 return err;
2590
2591 if (srq->com.from_state != RES_SRQ_HW) {
2592 err = -EBUSY;
2593 goto out;
2594 }
2595
2596 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2597 out:
2598 put_res(dev, slave, srqn, RES_SRQ);
2599 return err;
2600 }
2601
2602 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2603 struct mlx4_vhcr *vhcr,
2604 struct mlx4_cmd_mailbox *inbox,
2605 struct mlx4_cmd_mailbox *outbox,
2606 struct mlx4_cmd_info *cmd)
2607 {
2608 int err;
2609 int qpn = vhcr->in_modifier & 0x7fffff;
2610 struct res_qp *qp;
2611
2612 err = get_res(dev, slave, qpn, RES_QP, &qp);
2613 if (err)
2614 return err;
2615 if (qp->com.from_state != RES_QP_HW) {
2616 err = -EBUSY;
2617 goto out;
2618 }
2619
2620 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2621 out:
2622 put_res(dev, slave, qpn, RES_QP);
2623 return err;
2624 }
2625
2626 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2627 struct mlx4_vhcr *vhcr,
2628 struct mlx4_cmd_mailbox *inbox,
2629 struct mlx4_cmd_mailbox *outbox,
2630 struct mlx4_cmd_info *cmd)
2631 {
2632 struct mlx4_qp_context *qpc = inbox->buf + 8;
2633
2634 update_ud_gid(dev, qpc, (u8)slave);
2635
2636 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2637 }
2638
2639 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2640 struct mlx4_vhcr *vhcr,
2641 struct mlx4_cmd_mailbox *inbox,
2642 struct mlx4_cmd_mailbox *outbox,
2643 struct mlx4_cmd_info *cmd)
2644 {
2645 int err;
2646 int qpn = vhcr->in_modifier & 0x7fffff;
2647 struct res_qp *qp;
2648
2649 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2650 if (err)
2651 return err;
2652 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2653 if (err)
2654 goto ex_abort;
2655
2656 atomic_dec(&qp->mtt->ref_count);
2657 atomic_dec(&qp->rcq->ref_count);
2658 atomic_dec(&qp->scq->ref_count);
2659 if (qp->srq)
2660 atomic_dec(&qp->srq->ref_count);
2661 res_end_move(dev, slave, RES_QP, qpn);
2662 return 0;
2663
2664 ex_abort:
2665 res_abort_move(dev, slave, RES_QP, qpn);
2666
2667 return err;
2668 }
2669
2670 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2671 struct res_qp *rqp, u8 *gid)
2672 {
2673 struct res_gid *res;
2674
2675 list_for_each_entry(res, &rqp->mcg_list, list) {
2676 if (!memcmp(res->gid, gid, 16))
2677 return res;
2678 }
2679 return NULL;
2680 }
2681
2682 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2683 u8 *gid, enum mlx4_protocol prot,
2684 enum mlx4_steer_type steer)
2685 {
2686 struct res_gid *res;
2687 int err;
2688
2689 res = kzalloc(sizeof *res, GFP_KERNEL);
2690 if (!res)
2691 return -ENOMEM;
2692
2693 spin_lock_irq(&rqp->mcg_spl);
2694 if (find_gid(dev, slave, rqp, gid)) {
2695 kfree(res);
2696 err = -EEXIST;
2697 } else {
2698 memcpy(res->gid, gid, 16);
2699 res->prot = prot;
2700 res->steer = steer;
2701 list_add_tail(&res->list, &rqp->mcg_list);
2702 err = 0;
2703 }
2704 spin_unlock_irq(&rqp->mcg_spl);
2705
2706 return err;
2707 }
2708
2709 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2710 u8 *gid, enum mlx4_protocol prot,
2711 enum mlx4_steer_type steer)
2712 {
2713 struct res_gid *res;
2714 int err;
2715
2716 spin_lock_irq(&rqp->mcg_spl);
2717 res = find_gid(dev, slave, rqp, gid);
2718 if (!res || res->prot != prot || res->steer != steer)
2719 err = -EINVAL;
2720 else {
2721 list_del(&res->list);
2722 kfree(res);
2723 err = 0;
2724 }
2725 spin_unlock_irq(&rqp->mcg_spl);
2726
2727 return err;
2728 }
2729
2730 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2731 struct mlx4_vhcr *vhcr,
2732 struct mlx4_cmd_mailbox *inbox,
2733 struct mlx4_cmd_mailbox *outbox,
2734 struct mlx4_cmd_info *cmd)
2735 {
2736 struct mlx4_qp qp; /* dummy for calling attach/detach */
2737 u8 *gid = inbox->buf;
2738 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2739 int err;
2740 int qpn;
2741 struct res_qp *rqp;
2742 int attach = vhcr->op_modifier;
2743 int block_loopback = vhcr->in_modifier >> 31;
2744 u8 steer_type_mask = 2;
2745 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2746
2747 qpn = vhcr->in_modifier & 0xffffff;
2748 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2749 if (err)
2750 return err;
2751
2752 qp.qpn = qpn;
2753 if (attach) {
2754 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
2755 if (err)
2756 goto ex_put;
2757
2758 err = mlx4_qp_attach_common(dev, &qp, gid,
2759 block_loopback, prot, type);
2760 if (err)
2761 goto ex_rem;
2762 } else {
2763 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2764 if (err)
2765 goto ex_put;
2766 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2767 }
2768
2769 put_res(dev, slave, qpn, RES_QP);
2770 return 0;
2771
2772 ex_rem:
2773 /* ignore error return below, already in error */
2774 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
2775 ex_put:
2776 put_res(dev, slave, qpn, RES_QP);
2777
2778 return err;
2779 }
2780
2781 /*
2782 * MAC validation for Flow Steering rules.
2783 * VF can attach rules only with a mac address which is assigned to it.
2784 */
2785 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
2786 struct list_head *rlist)
2787 {
2788 struct mac_res *res, *tmp;
2789 __be64 be_mac;
2790
2791 /* make sure it isn't multicast or broadcast mac*/
2792 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
2793 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
2794 list_for_each_entry_safe(res, tmp, rlist, list) {
2795 be_mac = cpu_to_be64(res->mac << 16);
2796 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
2797 return 0;
2798 }
2799 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
2800 eth_header->eth.dst_mac, slave);
2801 return -EINVAL;
2802 }
2803 return 0;
2804 }
2805
2806 /*
2807 * In case of missing eth header, append eth header with a MAC address
2808 * assigned to the VF.
2809 */
2810 static int add_eth_header(struct mlx4_dev *dev, int slave,
2811 struct mlx4_cmd_mailbox *inbox,
2812 struct list_head *rlist, int header_id)
2813 {
2814 struct mac_res *res, *tmp;
2815 u8 port;
2816 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
2817 struct mlx4_net_trans_rule_hw_eth *eth_header;
2818 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
2819 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
2820 __be64 be_mac = 0;
2821 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
2822
2823 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
2824 port = be32_to_cpu(ctrl->vf_vep_port) & 0xff;
2825 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
2826
2827 /* Clear a space in the inbox for eth header */
2828 switch (header_id) {
2829 case MLX4_NET_TRANS_RULE_ID_IPV4:
2830 ip_header =
2831 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
2832 memmove(ip_header, eth_header,
2833 sizeof(*ip_header) + sizeof(*l4_header));
2834 break;
2835 case MLX4_NET_TRANS_RULE_ID_TCP:
2836 case MLX4_NET_TRANS_RULE_ID_UDP:
2837 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
2838 (eth_header + 1);
2839 memmove(l4_header, eth_header, sizeof(*l4_header));
2840 break;
2841 default:
2842 return -EINVAL;
2843 }
2844 list_for_each_entry_safe(res, tmp, rlist, list) {
2845 if (port == res->port) {
2846 be_mac = cpu_to_be64(res->mac << 16);
2847 break;
2848 }
2849 }
2850 if (!be_mac) {
2851 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
2852 port);
2853 return -EINVAL;
2854 }
2855
2856 memset(eth_header, 0, sizeof(*eth_header));
2857 eth_header->size = sizeof(*eth_header) >> 2;
2858 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
2859 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
2860 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
2861
2862 return 0;
2863
2864 }
2865
2866 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2867 struct mlx4_vhcr *vhcr,
2868 struct mlx4_cmd_mailbox *inbox,
2869 struct mlx4_cmd_mailbox *outbox,
2870 struct mlx4_cmd_info *cmd)
2871 {
2872
2873 struct mlx4_priv *priv = mlx4_priv(dev);
2874 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2875 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
2876 int err;
2877 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
2878 struct _rule_hw *rule_header;
2879 int header_id;
2880
2881 if (dev->caps.steering_mode !=
2882 MLX4_STEERING_MODE_DEVICE_MANAGED)
2883 return -EOPNOTSUPP;
2884
2885 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
2886 rule_header = (struct _rule_hw *)(ctrl + 1);
2887 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
2888
2889 switch (header_id) {
2890 case MLX4_NET_TRANS_RULE_ID_ETH:
2891 if (validate_eth_header_mac(slave, rule_header, rlist))
2892 return -EINVAL;
2893 break;
2894 case MLX4_NET_TRANS_RULE_ID_IPV4:
2895 case MLX4_NET_TRANS_RULE_ID_TCP:
2896 case MLX4_NET_TRANS_RULE_ID_UDP:
2897 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
2898 if (add_eth_header(dev, slave, inbox, rlist, header_id))
2899 return -EINVAL;
2900 vhcr->in_modifier +=
2901 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
2902 break;
2903 default:
2904 pr_err("Corrupted mailbox.\n");
2905 return -EINVAL;
2906 }
2907
2908 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
2909 vhcr->in_modifier, 0,
2910 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2911 MLX4_CMD_NATIVE);
2912 if (err)
2913 return err;
2914
2915 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
2916 if (err) {
2917 mlx4_err(dev, "Fail to add flow steering resources.\n ");
2918 /* detach rule*/
2919 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2920 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2921 MLX4_CMD_NATIVE);
2922 }
2923 return err;
2924 }
2925
2926 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
2927 struct mlx4_vhcr *vhcr,
2928 struct mlx4_cmd_mailbox *inbox,
2929 struct mlx4_cmd_mailbox *outbox,
2930 struct mlx4_cmd_info *cmd)
2931 {
2932 int err;
2933
2934 if (dev->caps.steering_mode !=
2935 MLX4_STEERING_MODE_DEVICE_MANAGED)
2936 return -EOPNOTSUPP;
2937
2938 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
2939 if (err) {
2940 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
2941 return err;
2942 }
2943
2944 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
2945 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
2946 MLX4_CMD_NATIVE);
2947 return err;
2948 }
2949
2950 enum {
2951 BUSY_MAX_RETRIES = 10
2952 };
2953
2954 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2955 struct mlx4_vhcr *vhcr,
2956 struct mlx4_cmd_mailbox *inbox,
2957 struct mlx4_cmd_mailbox *outbox,
2958 struct mlx4_cmd_info *cmd)
2959 {
2960 int err;
2961 int index = vhcr->in_modifier & 0xffff;
2962
2963 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2964 if (err)
2965 return err;
2966
2967 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2968 put_res(dev, slave, index, RES_COUNTER);
2969 return err;
2970 }
2971
2972 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2973 {
2974 struct res_gid *rgid;
2975 struct res_gid *tmp;
2976 struct mlx4_qp qp; /* dummy for calling attach/detach */
2977
2978 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2979 qp.qpn = rqp->local_qpn;
2980 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2981 rgid->steer);
2982 list_del(&rgid->list);
2983 kfree(rgid);
2984 }
2985 }
2986
2987 static int _move_all_busy(struct mlx4_dev *dev, int slave,
2988 enum mlx4_resource type, int print)
2989 {
2990 struct mlx4_priv *priv = mlx4_priv(dev);
2991 struct mlx4_resource_tracker *tracker =
2992 &priv->mfunc.master.res_tracker;
2993 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2994 struct res_common *r;
2995 struct res_common *tmp;
2996 int busy;
2997
2998 busy = 0;
2999 spin_lock_irq(mlx4_tlock(dev));
3000 list_for_each_entry_safe(r, tmp, rlist, list) {
3001 if (r->owner == slave) {
3002 if (!r->removing) {
3003 if (r->state == RES_ANY_BUSY) {
3004 if (print)
3005 mlx4_dbg(dev,
3006 "%s id 0x%llx is busy\n",
3007 ResourceType(type),
3008 r->res_id);
3009 ++busy;
3010 } else {
3011 r->from_state = r->state;
3012 r->state = RES_ANY_BUSY;
3013 r->removing = 1;
3014 }
3015 }
3016 }
3017 }
3018 spin_unlock_irq(mlx4_tlock(dev));
3019
3020 return busy;
3021 }
3022
3023 static int move_all_busy(struct mlx4_dev *dev, int slave,
3024 enum mlx4_resource type)
3025 {
3026 unsigned long begin;
3027 int busy;
3028
3029 begin = jiffies;
3030 do {
3031 busy = _move_all_busy(dev, slave, type, 0);
3032 if (time_after(jiffies, begin + 5 * HZ))
3033 break;
3034 if (busy)
3035 cond_resched();
3036 } while (busy);
3037
3038 if (busy)
3039 busy = _move_all_busy(dev, slave, type, 1);
3040
3041 return busy;
3042 }
3043 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3044 {
3045 struct mlx4_priv *priv = mlx4_priv(dev);
3046 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3047 struct list_head *qp_list =
3048 &tracker->slave_list[slave].res_list[RES_QP];
3049 struct res_qp *qp;
3050 struct res_qp *tmp;
3051 int state;
3052 u64 in_param;
3053 int qpn;
3054 int err;
3055
3056 err = move_all_busy(dev, slave, RES_QP);
3057 if (err)
3058 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3059 "for slave %d\n", slave);
3060
3061 spin_lock_irq(mlx4_tlock(dev));
3062 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3063 spin_unlock_irq(mlx4_tlock(dev));
3064 if (qp->com.owner == slave) {
3065 qpn = qp->com.res_id;
3066 detach_qp(dev, slave, qp);
3067 state = qp->com.from_state;
3068 while (state != 0) {
3069 switch (state) {
3070 case RES_QP_RESERVED:
3071 spin_lock_irq(mlx4_tlock(dev));
3072 rb_erase(&qp->com.node,
3073 &tracker->res_tree[RES_QP]);
3074 list_del(&qp->com.list);
3075 spin_unlock_irq(mlx4_tlock(dev));
3076 kfree(qp);
3077 state = 0;
3078 break;
3079 case RES_QP_MAPPED:
3080 if (!valid_reserved(dev, slave, qpn))
3081 __mlx4_qp_free_icm(dev, qpn);
3082 state = RES_QP_RESERVED;
3083 break;
3084 case RES_QP_HW:
3085 in_param = slave;
3086 err = mlx4_cmd(dev, in_param,
3087 qp->local_qpn, 2,
3088 MLX4_CMD_2RST_QP,
3089 MLX4_CMD_TIME_CLASS_A,
3090 MLX4_CMD_NATIVE);
3091 if (err)
3092 mlx4_dbg(dev, "rem_slave_qps: failed"
3093 " to move slave %d qpn %d to"
3094 " reset\n", slave,
3095 qp->local_qpn);
3096 atomic_dec(&qp->rcq->ref_count);
3097 atomic_dec(&qp->scq->ref_count);
3098 atomic_dec(&qp->mtt->ref_count);
3099 if (qp->srq)
3100 atomic_dec(&qp->srq->ref_count);
3101 state = RES_QP_MAPPED;
3102 break;
3103 default:
3104 state = 0;
3105 }
3106 }
3107 }
3108 spin_lock_irq(mlx4_tlock(dev));
3109 }
3110 spin_unlock_irq(mlx4_tlock(dev));
3111 }
3112
3113 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3114 {
3115 struct mlx4_priv *priv = mlx4_priv(dev);
3116 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3117 struct list_head *srq_list =
3118 &tracker->slave_list[slave].res_list[RES_SRQ];
3119 struct res_srq *srq;
3120 struct res_srq *tmp;
3121 int state;
3122 u64 in_param;
3123 LIST_HEAD(tlist);
3124 int srqn;
3125 int err;
3126
3127 err = move_all_busy(dev, slave, RES_SRQ);
3128 if (err)
3129 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3130 "busy for slave %d\n", slave);
3131
3132 spin_lock_irq(mlx4_tlock(dev));
3133 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3134 spin_unlock_irq(mlx4_tlock(dev));
3135 if (srq->com.owner == slave) {
3136 srqn = srq->com.res_id;
3137 state = srq->com.from_state;
3138 while (state != 0) {
3139 switch (state) {
3140 case RES_SRQ_ALLOCATED:
3141 __mlx4_srq_free_icm(dev, srqn);
3142 spin_lock_irq(mlx4_tlock(dev));
3143 rb_erase(&srq->com.node,
3144 &tracker->res_tree[RES_SRQ]);
3145 list_del(&srq->com.list);
3146 spin_unlock_irq(mlx4_tlock(dev));
3147 kfree(srq);
3148 state = 0;
3149 break;
3150
3151 case RES_SRQ_HW:
3152 in_param = slave;
3153 err = mlx4_cmd(dev, in_param, srqn, 1,
3154 MLX4_CMD_HW2SW_SRQ,
3155 MLX4_CMD_TIME_CLASS_A,
3156 MLX4_CMD_NATIVE);
3157 if (err)
3158 mlx4_dbg(dev, "rem_slave_srqs: failed"
3159 " to move slave %d srq %d to"
3160 " SW ownership\n",
3161 slave, srqn);
3162
3163 atomic_dec(&srq->mtt->ref_count);
3164 if (srq->cq)
3165 atomic_dec(&srq->cq->ref_count);
3166 state = RES_SRQ_ALLOCATED;
3167 break;
3168
3169 default:
3170 state = 0;
3171 }
3172 }
3173 }
3174 spin_lock_irq(mlx4_tlock(dev));
3175 }
3176 spin_unlock_irq(mlx4_tlock(dev));
3177 }
3178
3179 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3180 {
3181 struct mlx4_priv *priv = mlx4_priv(dev);
3182 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3183 struct list_head *cq_list =
3184 &tracker->slave_list[slave].res_list[RES_CQ];
3185 struct res_cq *cq;
3186 struct res_cq *tmp;
3187 int state;
3188 u64 in_param;
3189 LIST_HEAD(tlist);
3190 int cqn;
3191 int err;
3192
3193 err = move_all_busy(dev, slave, RES_CQ);
3194 if (err)
3195 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3196 "busy for slave %d\n", slave);
3197
3198 spin_lock_irq(mlx4_tlock(dev));
3199 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3200 spin_unlock_irq(mlx4_tlock(dev));
3201 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3202 cqn = cq->com.res_id;
3203 state = cq->com.from_state;
3204 while (state != 0) {
3205 switch (state) {
3206 case RES_CQ_ALLOCATED:
3207 __mlx4_cq_free_icm(dev, cqn);
3208 spin_lock_irq(mlx4_tlock(dev));
3209 rb_erase(&cq->com.node,
3210 &tracker->res_tree[RES_CQ]);
3211 list_del(&cq->com.list);
3212 spin_unlock_irq(mlx4_tlock(dev));
3213 kfree(cq);
3214 state = 0;
3215 break;
3216
3217 case RES_CQ_HW:
3218 in_param = slave;
3219 err = mlx4_cmd(dev, in_param, cqn, 1,
3220 MLX4_CMD_HW2SW_CQ,
3221 MLX4_CMD_TIME_CLASS_A,
3222 MLX4_CMD_NATIVE);
3223 if (err)
3224 mlx4_dbg(dev, "rem_slave_cqs: failed"
3225 " to move slave %d cq %d to"
3226 " SW ownership\n",
3227 slave, cqn);
3228 atomic_dec(&cq->mtt->ref_count);
3229 state = RES_CQ_ALLOCATED;
3230 break;
3231
3232 default:
3233 state = 0;
3234 }
3235 }
3236 }
3237 spin_lock_irq(mlx4_tlock(dev));
3238 }
3239 spin_unlock_irq(mlx4_tlock(dev));
3240 }
3241
3242 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3243 {
3244 struct mlx4_priv *priv = mlx4_priv(dev);
3245 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3246 struct list_head *mpt_list =
3247 &tracker->slave_list[slave].res_list[RES_MPT];
3248 struct res_mpt *mpt;
3249 struct res_mpt *tmp;
3250 int state;
3251 u64 in_param;
3252 LIST_HEAD(tlist);
3253 int mptn;
3254 int err;
3255
3256 err = move_all_busy(dev, slave, RES_MPT);
3257 if (err)
3258 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3259 "busy for slave %d\n", slave);
3260
3261 spin_lock_irq(mlx4_tlock(dev));
3262 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3263 spin_unlock_irq(mlx4_tlock(dev));
3264 if (mpt->com.owner == slave) {
3265 mptn = mpt->com.res_id;
3266 state = mpt->com.from_state;
3267 while (state != 0) {
3268 switch (state) {
3269 case RES_MPT_RESERVED:
3270 __mlx4_mr_release(dev, mpt->key);
3271 spin_lock_irq(mlx4_tlock(dev));
3272 rb_erase(&mpt->com.node,
3273 &tracker->res_tree[RES_MPT]);
3274 list_del(&mpt->com.list);
3275 spin_unlock_irq(mlx4_tlock(dev));
3276 kfree(mpt);
3277 state = 0;
3278 break;
3279
3280 case RES_MPT_MAPPED:
3281 __mlx4_mr_free_icm(dev, mpt->key);
3282 state = RES_MPT_RESERVED;
3283 break;
3284
3285 case RES_MPT_HW:
3286 in_param = slave;
3287 err = mlx4_cmd(dev, in_param, mptn, 0,
3288 MLX4_CMD_HW2SW_MPT,
3289 MLX4_CMD_TIME_CLASS_A,
3290 MLX4_CMD_NATIVE);
3291 if (err)
3292 mlx4_dbg(dev, "rem_slave_mrs: failed"
3293 " to move slave %d mpt %d to"
3294 " SW ownership\n",
3295 slave, mptn);
3296 if (mpt->mtt)
3297 atomic_dec(&mpt->mtt->ref_count);
3298 state = RES_MPT_MAPPED;
3299 break;
3300 default:
3301 state = 0;
3302 }
3303 }
3304 }
3305 spin_lock_irq(mlx4_tlock(dev));
3306 }
3307 spin_unlock_irq(mlx4_tlock(dev));
3308 }
3309
3310 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3311 {
3312 struct mlx4_priv *priv = mlx4_priv(dev);
3313 struct mlx4_resource_tracker *tracker =
3314 &priv->mfunc.master.res_tracker;
3315 struct list_head *mtt_list =
3316 &tracker->slave_list[slave].res_list[RES_MTT];
3317 struct res_mtt *mtt;
3318 struct res_mtt *tmp;
3319 int state;
3320 LIST_HEAD(tlist);
3321 int base;
3322 int err;
3323
3324 err = move_all_busy(dev, slave, RES_MTT);
3325 if (err)
3326 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3327 "busy for slave %d\n", slave);
3328
3329 spin_lock_irq(mlx4_tlock(dev));
3330 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3331 spin_unlock_irq(mlx4_tlock(dev));
3332 if (mtt->com.owner == slave) {
3333 base = mtt->com.res_id;
3334 state = mtt->com.from_state;
3335 while (state != 0) {
3336 switch (state) {
3337 case RES_MTT_ALLOCATED:
3338 __mlx4_free_mtt_range(dev, base,
3339 mtt->order);
3340 spin_lock_irq(mlx4_tlock(dev));
3341 rb_erase(&mtt->com.node,
3342 &tracker->res_tree[RES_MTT]);
3343 list_del(&mtt->com.list);
3344 spin_unlock_irq(mlx4_tlock(dev));
3345 kfree(mtt);
3346 state = 0;
3347 break;
3348
3349 default:
3350 state = 0;
3351 }
3352 }
3353 }
3354 spin_lock_irq(mlx4_tlock(dev));
3355 }
3356 spin_unlock_irq(mlx4_tlock(dev));
3357 }
3358
3359 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3360 {
3361 struct mlx4_priv *priv = mlx4_priv(dev);
3362 struct mlx4_resource_tracker *tracker =
3363 &priv->mfunc.master.res_tracker;
3364 struct list_head *fs_rule_list =
3365 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3366 struct res_fs_rule *fs_rule;
3367 struct res_fs_rule *tmp;
3368 int state;
3369 u64 base;
3370 int err;
3371
3372 err = move_all_busy(dev, slave, RES_FS_RULE);
3373 if (err)
3374 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3375 slave);
3376
3377 spin_lock_irq(mlx4_tlock(dev));
3378 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3379 spin_unlock_irq(mlx4_tlock(dev));
3380 if (fs_rule->com.owner == slave) {
3381 base = fs_rule->com.res_id;
3382 state = fs_rule->com.from_state;
3383 while (state != 0) {
3384 switch (state) {
3385 case RES_FS_RULE_ALLOCATED:
3386 /* detach rule */
3387 err = mlx4_cmd(dev, base, 0, 0,
3388 MLX4_QP_FLOW_STEERING_DETACH,
3389 MLX4_CMD_TIME_CLASS_A,
3390 MLX4_CMD_NATIVE);
3391
3392 spin_lock_irq(mlx4_tlock(dev));
3393 rb_erase(&fs_rule->com.node,
3394 &tracker->res_tree[RES_FS_RULE]);
3395 list_del(&fs_rule->com.list);
3396 spin_unlock_irq(mlx4_tlock(dev));
3397 kfree(fs_rule);
3398 state = 0;
3399 break;
3400
3401 default:
3402 state = 0;
3403 }
3404 }
3405 }
3406 spin_lock_irq(mlx4_tlock(dev));
3407 }
3408 spin_unlock_irq(mlx4_tlock(dev));
3409 }
3410
3411 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3412 {
3413 struct mlx4_priv *priv = mlx4_priv(dev);
3414 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3415 struct list_head *eq_list =
3416 &tracker->slave_list[slave].res_list[RES_EQ];
3417 struct res_eq *eq;
3418 struct res_eq *tmp;
3419 int err;
3420 int state;
3421 LIST_HEAD(tlist);
3422 int eqn;
3423 struct mlx4_cmd_mailbox *mailbox;
3424
3425 err = move_all_busy(dev, slave, RES_EQ);
3426 if (err)
3427 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3428 "busy for slave %d\n", slave);
3429
3430 spin_lock_irq(mlx4_tlock(dev));
3431 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3432 spin_unlock_irq(mlx4_tlock(dev));
3433 if (eq->com.owner == slave) {
3434 eqn = eq->com.res_id;
3435 state = eq->com.from_state;
3436 while (state != 0) {
3437 switch (state) {
3438 case RES_EQ_RESERVED:
3439 spin_lock_irq(mlx4_tlock(dev));
3440 rb_erase(&eq->com.node,
3441 &tracker->res_tree[RES_EQ]);
3442 list_del(&eq->com.list);
3443 spin_unlock_irq(mlx4_tlock(dev));
3444 kfree(eq);
3445 state = 0;
3446 break;
3447
3448 case RES_EQ_HW:
3449 mailbox = mlx4_alloc_cmd_mailbox(dev);
3450 if (IS_ERR(mailbox)) {
3451 cond_resched();
3452 continue;
3453 }
3454 err = mlx4_cmd_box(dev, slave, 0,
3455 eqn & 0xff, 0,
3456 MLX4_CMD_HW2SW_EQ,
3457 MLX4_CMD_TIME_CLASS_A,
3458 MLX4_CMD_NATIVE);
3459 if (err)
3460 mlx4_dbg(dev, "rem_slave_eqs: failed"
3461 " to move slave %d eqs %d to"
3462 " SW ownership\n", slave, eqn);
3463 mlx4_free_cmd_mailbox(dev, mailbox);
3464 atomic_dec(&eq->mtt->ref_count);
3465 state = RES_EQ_RESERVED;
3466 break;
3467
3468 default:
3469 state = 0;
3470 }
3471 }
3472 }
3473 spin_lock_irq(mlx4_tlock(dev));
3474 }
3475 spin_unlock_irq(mlx4_tlock(dev));
3476 }
3477
3478 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3479 {
3480 struct mlx4_priv *priv = mlx4_priv(dev);
3481 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3482 struct list_head *counter_list =
3483 &tracker->slave_list[slave].res_list[RES_COUNTER];
3484 struct res_counter *counter;
3485 struct res_counter *tmp;
3486 int err;
3487 int index;
3488
3489 err = move_all_busy(dev, slave, RES_COUNTER);
3490 if (err)
3491 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3492 "busy for slave %d\n", slave);
3493
3494 spin_lock_irq(mlx4_tlock(dev));
3495 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3496 if (counter->com.owner == slave) {
3497 index = counter->com.res_id;
3498 rb_erase(&counter->com.node,
3499 &tracker->res_tree[RES_COUNTER]);
3500 list_del(&counter->com.list);
3501 kfree(counter);
3502 __mlx4_counter_free(dev, index);
3503 }
3504 }
3505 spin_unlock_irq(mlx4_tlock(dev));
3506 }
3507
3508 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3509 {
3510 struct mlx4_priv *priv = mlx4_priv(dev);
3511 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3512 struct list_head *xrcdn_list =
3513 &tracker->slave_list[slave].res_list[RES_XRCD];
3514 struct res_xrcdn *xrcd;
3515 struct res_xrcdn *tmp;
3516 int err;
3517 int xrcdn;
3518
3519 err = move_all_busy(dev, slave, RES_XRCD);
3520 if (err)
3521 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3522 "busy for slave %d\n", slave);
3523
3524 spin_lock_irq(mlx4_tlock(dev));
3525 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3526 if (xrcd->com.owner == slave) {
3527 xrcdn = xrcd->com.res_id;
3528 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3529 list_del(&xrcd->com.list);
3530 kfree(xrcd);
3531 __mlx4_xrcd_free(dev, xrcdn);
3532 }
3533 }
3534 spin_unlock_irq(mlx4_tlock(dev));
3535 }
3536
3537 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3538 {
3539 struct mlx4_priv *priv = mlx4_priv(dev);
3540
3541 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3542 /*VLAN*/
3543 rem_slave_macs(dev, slave);
3544 rem_slave_qps(dev, slave);
3545 rem_slave_srqs(dev, slave);
3546 rem_slave_cqs(dev, slave);
3547 rem_slave_mrs(dev, slave);
3548 rem_slave_eqs(dev, slave);
3549 rem_slave_mtts(dev, slave);
3550 rem_slave_counters(dev, slave);
3551 rem_slave_xrcdns(dev, slave);
3552 rem_slave_fs_rule(dev, slave);
3553 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3554 }