2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
49 #include "mlx4_stats.h"
51 #define MLX4_MAC_VALID (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT 2
53 #define MLX4_VF_COUNTERS_PER_PORT 1
56 struct list_head list
;
64 struct list_head list
;
72 struct list_head list
;
80 const char *func_name
;
88 struct list_head list
;
90 enum mlx4_protocol prot
;
91 enum mlx4_steer_type steer
;
96 RES_QP_BUSY
= RES_ANY_BUSY
,
98 /* QP number was allocated */
101 /* ICM memory for QP context was mapped */
104 /* QP is in hw ownership */
109 struct res_common com
;
114 struct list_head mcg_list
;
119 /* saved qp params before VST enforcement in order to restore on VGT */
129 enum res_mtt_states
{
130 RES_MTT_BUSY
= RES_ANY_BUSY
,
134 static inline const char *mtt_states_str(enum res_mtt_states state
)
137 case RES_MTT_BUSY
: return "RES_MTT_BUSY";
138 case RES_MTT_ALLOCATED
: return "RES_MTT_ALLOCATED";
139 default: return "Unknown";
144 struct res_common com
;
149 enum res_mpt_states
{
150 RES_MPT_BUSY
= RES_ANY_BUSY
,
157 struct res_common com
;
163 RES_EQ_BUSY
= RES_ANY_BUSY
,
169 struct res_common com
;
174 RES_CQ_BUSY
= RES_ANY_BUSY
,
180 struct res_common com
;
185 enum res_srq_states
{
186 RES_SRQ_BUSY
= RES_ANY_BUSY
,
192 struct res_common com
;
198 enum res_counter_states
{
199 RES_COUNTER_BUSY
= RES_ANY_BUSY
,
200 RES_COUNTER_ALLOCATED
,
204 struct res_common com
;
208 enum res_xrcdn_states
{
209 RES_XRCD_BUSY
= RES_ANY_BUSY
,
214 struct res_common com
;
218 enum res_fs_rule_states
{
219 RES_FS_RULE_BUSY
= RES_ANY_BUSY
,
220 RES_FS_RULE_ALLOCATED
,
224 struct res_common com
;
226 /* VF DMFS mbox with port flipped */
228 /* > 0 --> apply mirror when getting into HA mode */
229 /* = 0 --> un-apply mirror when getting out of HA mode */
231 struct list_head mirr_list
;
235 static void *res_tracker_lookup(struct rb_root
*root
, u64 res_id
)
237 struct rb_node
*node
= root
->rb_node
;
240 struct res_common
*res
= rb_entry(node
, struct res_common
,
243 if (res_id
< res
->res_id
)
244 node
= node
->rb_left
;
245 else if (res_id
> res
->res_id
)
246 node
= node
->rb_right
;
253 static int res_tracker_insert(struct rb_root
*root
, struct res_common
*res
)
255 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
257 /* Figure out where to put new node */
259 struct res_common
*this = rb_entry(*new, struct res_common
,
263 if (res
->res_id
< this->res_id
)
264 new = &((*new)->rb_left
);
265 else if (res
->res_id
> this->res_id
)
266 new = &((*new)->rb_right
);
271 /* Add new node and rebalance tree. */
272 rb_link_node(&res
->node
, parent
, new);
273 rb_insert_color(&res
->node
, root
);
288 static const char *resource_str(enum mlx4_resource rt
)
291 case RES_QP
: return "RES_QP";
292 case RES_CQ
: return "RES_CQ";
293 case RES_SRQ
: return "RES_SRQ";
294 case RES_MPT
: return "RES_MPT";
295 case RES_MTT
: return "RES_MTT";
296 case RES_MAC
: return "RES_MAC";
297 case RES_VLAN
: return "RES_VLAN";
298 case RES_EQ
: return "RES_EQ";
299 case RES_COUNTER
: return "RES_COUNTER";
300 case RES_FS_RULE
: return "RES_FS_RULE";
301 case RES_XRCD
: return "RES_XRCD";
302 default: return "Unknown resource type !!!";
306 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
);
307 static inline int mlx4_grant_resource(struct mlx4_dev
*dev
, int slave
,
308 enum mlx4_resource res_type
, int count
,
311 struct mlx4_priv
*priv
= mlx4_priv(dev
);
312 struct resource_allocator
*res_alloc
=
313 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
315 int allocated
, free
, reserved
, guaranteed
, from_free
;
318 if (slave
> dev
->persist
->num_vfs
)
321 spin_lock(&res_alloc
->alloc_lock
);
322 allocated
= (port
> 0) ?
323 res_alloc
->allocated
[(port
- 1) *
324 (dev
->persist
->num_vfs
+ 1) + slave
] :
325 res_alloc
->allocated
[slave
];
326 free
= (port
> 0) ? res_alloc
->res_port_free
[port
- 1] :
328 reserved
= (port
> 0) ? res_alloc
->res_port_rsvd
[port
- 1] :
329 res_alloc
->res_reserved
;
330 guaranteed
= res_alloc
->guaranteed
[slave
];
332 if (allocated
+ count
> res_alloc
->quota
[slave
]) {
333 mlx4_warn(dev
, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
334 slave
, port
, resource_str(res_type
), count
,
335 allocated
, res_alloc
->quota
[slave
]);
339 if (allocated
+ count
<= guaranteed
) {
343 /* portion may need to be obtained from free area */
344 if (guaranteed
- allocated
> 0)
345 from_free
= count
- (guaranteed
- allocated
);
349 from_rsvd
= count
- from_free
;
351 if (free
- from_free
>= reserved
)
354 mlx4_warn(dev
, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
355 slave
, port
, resource_str(res_type
), free
,
356 from_free
, reserved
);
360 /* grant the request */
362 res_alloc
->allocated
[(port
- 1) *
363 (dev
->persist
->num_vfs
+ 1) + slave
] += count
;
364 res_alloc
->res_port_free
[port
- 1] -= count
;
365 res_alloc
->res_port_rsvd
[port
- 1] -= from_rsvd
;
367 res_alloc
->allocated
[slave
] += count
;
368 res_alloc
->res_free
-= count
;
369 res_alloc
->res_reserved
-= from_rsvd
;
374 spin_unlock(&res_alloc
->alloc_lock
);
378 static inline void mlx4_release_resource(struct mlx4_dev
*dev
, int slave
,
379 enum mlx4_resource res_type
, int count
,
382 struct mlx4_priv
*priv
= mlx4_priv(dev
);
383 struct resource_allocator
*res_alloc
=
384 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
385 int allocated
, guaranteed
, from_rsvd
;
387 if (slave
> dev
->persist
->num_vfs
)
390 spin_lock(&res_alloc
->alloc_lock
);
392 allocated
= (port
> 0) ?
393 res_alloc
->allocated
[(port
- 1) *
394 (dev
->persist
->num_vfs
+ 1) + slave
] :
395 res_alloc
->allocated
[slave
];
396 guaranteed
= res_alloc
->guaranteed
[slave
];
398 if (allocated
- count
>= guaranteed
) {
401 /* portion may need to be returned to reserved area */
402 if (allocated
- guaranteed
> 0)
403 from_rsvd
= count
- (allocated
- guaranteed
);
409 res_alloc
->allocated
[(port
- 1) *
410 (dev
->persist
->num_vfs
+ 1) + slave
] -= count
;
411 res_alloc
->res_port_free
[port
- 1] += count
;
412 res_alloc
->res_port_rsvd
[port
- 1] += from_rsvd
;
414 res_alloc
->allocated
[slave
] -= count
;
415 res_alloc
->res_free
+= count
;
416 res_alloc
->res_reserved
+= from_rsvd
;
419 spin_unlock(&res_alloc
->alloc_lock
);
423 static inline void initialize_res_quotas(struct mlx4_dev
*dev
,
424 struct resource_allocator
*res_alloc
,
425 enum mlx4_resource res_type
,
426 int vf
, int num_instances
)
428 res_alloc
->guaranteed
[vf
] = num_instances
/
429 (2 * (dev
->persist
->num_vfs
+ 1));
430 res_alloc
->quota
[vf
] = (num_instances
/ 2) + res_alloc
->guaranteed
[vf
];
431 if (vf
== mlx4_master_func_num(dev
)) {
432 res_alloc
->res_free
= num_instances
;
433 if (res_type
== RES_MTT
) {
434 /* reserved mtts will be taken out of the PF allocation */
435 res_alloc
->res_free
+= dev
->caps
.reserved_mtts
;
436 res_alloc
->guaranteed
[vf
] += dev
->caps
.reserved_mtts
;
437 res_alloc
->quota
[vf
] += dev
->caps
.reserved_mtts
;
442 void mlx4_init_quotas(struct mlx4_dev
*dev
)
444 struct mlx4_priv
*priv
= mlx4_priv(dev
);
447 /* quotas for VFs are initialized in mlx4_slave_cap */
448 if (mlx4_is_slave(dev
))
451 if (!mlx4_is_mfunc(dev
)) {
452 dev
->quotas
.qp
= dev
->caps
.num_qps
- dev
->caps
.reserved_qps
-
453 mlx4_num_reserved_sqps(dev
);
454 dev
->quotas
.cq
= dev
->caps
.num_cqs
- dev
->caps
.reserved_cqs
;
455 dev
->quotas
.srq
= dev
->caps
.num_srqs
- dev
->caps
.reserved_srqs
;
456 dev
->quotas
.mtt
= dev
->caps
.num_mtts
- dev
->caps
.reserved_mtts
;
457 dev
->quotas
.mpt
= dev
->caps
.num_mpts
- dev
->caps
.reserved_mrws
;
461 pf
= mlx4_master_func_num(dev
);
463 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[pf
];
465 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[pf
];
467 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[pf
];
469 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[pf
];
471 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[pf
];
474 static int get_max_gauranteed_vfs_counter(struct mlx4_dev
*dev
)
476 /* reduce the sink counter */
477 return (dev
->caps
.max_counters
- 1 -
478 (MLX4_PF_COUNTERS_PER_PORT
* MLX4_MAX_PORTS
))
482 int mlx4_init_resource_tracker(struct mlx4_dev
*dev
)
484 struct mlx4_priv
*priv
= mlx4_priv(dev
);
487 int max_vfs_guarantee_counter
= get_max_gauranteed_vfs_counter(dev
);
489 priv
->mfunc
.master
.res_tracker
.slave_list
=
490 kzalloc(dev
->num_slaves
* sizeof(struct slave_list
),
492 if (!priv
->mfunc
.master
.res_tracker
.slave_list
)
495 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
496 for (t
= 0; t
< MLX4_NUM_OF_RESOURCE_TYPE
; ++t
)
497 INIT_LIST_HEAD(&priv
->mfunc
.master
.res_tracker
.
498 slave_list
[i
].res_list
[t
]);
499 mutex_init(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
502 mlx4_dbg(dev
, "Started init_resource_tracker: %ld slaves\n",
504 for (i
= 0 ; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++)
505 priv
->mfunc
.master
.res_tracker
.res_tree
[i
] = RB_ROOT
;
507 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
508 struct resource_allocator
*res_alloc
=
509 &priv
->mfunc
.master
.res_tracker
.res_alloc
[i
];
510 res_alloc
->quota
= kmalloc((dev
->persist
->num_vfs
+ 1) *
511 sizeof(int), GFP_KERNEL
);
512 res_alloc
->guaranteed
= kmalloc((dev
->persist
->num_vfs
+ 1) *
513 sizeof(int), GFP_KERNEL
);
514 if (i
== RES_MAC
|| i
== RES_VLAN
)
515 res_alloc
->allocated
= kzalloc(MLX4_MAX_PORTS
*
516 (dev
->persist
->num_vfs
518 sizeof(int), GFP_KERNEL
);
520 res_alloc
->allocated
= kzalloc((dev
->persist
->
522 sizeof(int), GFP_KERNEL
);
523 /* Reduce the sink counter */
524 if (i
== RES_COUNTER
)
525 res_alloc
->res_free
= dev
->caps
.max_counters
- 1;
527 if (!res_alloc
->quota
|| !res_alloc
->guaranteed
||
528 !res_alloc
->allocated
)
531 spin_lock_init(&res_alloc
->alloc_lock
);
532 for (t
= 0; t
< dev
->persist
->num_vfs
+ 1; t
++) {
533 struct mlx4_active_ports actv_ports
=
534 mlx4_get_active_ports(dev
, t
);
537 initialize_res_quotas(dev
, res_alloc
, RES_QP
,
538 t
, dev
->caps
.num_qps
-
539 dev
->caps
.reserved_qps
-
540 mlx4_num_reserved_sqps(dev
));
543 initialize_res_quotas(dev
, res_alloc
, RES_CQ
,
544 t
, dev
->caps
.num_cqs
-
545 dev
->caps
.reserved_cqs
);
548 initialize_res_quotas(dev
, res_alloc
, RES_SRQ
,
549 t
, dev
->caps
.num_srqs
-
550 dev
->caps
.reserved_srqs
);
553 initialize_res_quotas(dev
, res_alloc
, RES_MPT
,
554 t
, dev
->caps
.num_mpts
-
555 dev
->caps
.reserved_mrws
);
558 initialize_res_quotas(dev
, res_alloc
, RES_MTT
,
559 t
, dev
->caps
.num_mtts
-
560 dev
->caps
.reserved_mtts
);
563 if (t
== mlx4_master_func_num(dev
)) {
564 int max_vfs_pport
= 0;
565 /* Calculate the max vfs per port for */
567 for (j
= 0; j
< dev
->caps
.num_ports
;
569 struct mlx4_slaves_pport slaves_pport
=
570 mlx4_phys_to_slaves_pport(dev
, j
+ 1);
571 unsigned current_slaves
=
572 bitmap_weight(slaves_pport
.slaves
,
573 dev
->caps
.num_ports
) - 1;
574 if (max_vfs_pport
< current_slaves
)
578 res_alloc
->quota
[t
] =
581 res_alloc
->guaranteed
[t
] = 2;
582 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
583 res_alloc
->res_port_free
[j
] =
586 res_alloc
->quota
[t
] = MLX4_MAX_MAC_NUM
;
587 res_alloc
->guaranteed
[t
] = 2;
591 if (t
== mlx4_master_func_num(dev
)) {
592 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
;
593 res_alloc
->guaranteed
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
594 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
595 res_alloc
->res_port_free
[j
] =
598 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
599 res_alloc
->guaranteed
[t
] = 0;
603 res_alloc
->quota
[t
] = dev
->caps
.max_counters
;
604 if (t
== mlx4_master_func_num(dev
))
605 res_alloc
->guaranteed
[t
] =
606 MLX4_PF_COUNTERS_PER_PORT
*
608 else if (t
<= max_vfs_guarantee_counter
)
609 res_alloc
->guaranteed
[t
] =
610 MLX4_VF_COUNTERS_PER_PORT
*
613 res_alloc
->guaranteed
[t
] = 0;
614 res_alloc
->res_free
-= res_alloc
->guaranteed
[t
];
619 if (i
== RES_MAC
|| i
== RES_VLAN
) {
620 for (j
= 0; j
< dev
->caps
.num_ports
; j
++)
621 if (test_bit(j
, actv_ports
.ports
))
622 res_alloc
->res_port_rsvd
[j
] +=
623 res_alloc
->guaranteed
[t
];
625 res_alloc
->res_reserved
+= res_alloc
->guaranteed
[t
];
629 spin_lock_init(&priv
->mfunc
.master
.res_tracker
.lock
);
633 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
634 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
635 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
636 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
637 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
638 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
639 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
644 void mlx4_free_resource_tracker(struct mlx4_dev
*dev
,
645 enum mlx4_res_tracker_free_type type
)
647 struct mlx4_priv
*priv
= mlx4_priv(dev
);
650 if (priv
->mfunc
.master
.res_tracker
.slave_list
) {
651 if (type
!= RES_TR_FREE_STRUCTS_ONLY
) {
652 for (i
= 0; i
< dev
->num_slaves
; i
++) {
653 if (type
== RES_TR_FREE_ALL
||
654 dev
->caps
.function
!= i
)
655 mlx4_delete_all_resources_for_slave(dev
, i
);
657 /* free master's vlans */
658 i
= dev
->caps
.function
;
659 mlx4_reset_roce_gids(dev
, i
);
660 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
661 rem_slave_vlans(dev
, i
);
662 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
665 if (type
!= RES_TR_FREE_SLAVES_ONLY
) {
666 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
667 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
668 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
669 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
670 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
671 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
672 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
674 kfree(priv
->mfunc
.master
.res_tracker
.slave_list
);
675 priv
->mfunc
.master
.res_tracker
.slave_list
= NULL
;
680 static void update_pkey_index(struct mlx4_dev
*dev
, int slave
,
681 struct mlx4_cmd_mailbox
*inbox
)
683 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
684 u8 orig_index
= *(u8
*)(inbox
->buf
+ 35);
686 struct mlx4_priv
*priv
= mlx4_priv(dev
);
689 port
= (sched
>> 6 & 1) + 1;
691 new_index
= priv
->virt2phys_pkey
[slave
][port
- 1][orig_index
];
692 *(u8
*)(inbox
->buf
+ 35) = new_index
;
695 static void update_gid(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*inbox
,
698 struct mlx4_qp_context
*qp_ctx
= inbox
->buf
+ 8;
699 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
700 u32 ts
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
703 if (MLX4_QP_ST_UD
== ts
) {
704 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
705 if (mlx4_is_eth(dev
, port
))
706 qp_ctx
->pri_path
.mgid_index
=
707 mlx4_get_base_gid_ix(dev
, slave
, port
) | 0x80;
709 qp_ctx
->pri_path
.mgid_index
= slave
| 0x80;
711 } else if (MLX4_QP_ST_RC
== ts
|| MLX4_QP_ST_XRC
== ts
|| MLX4_QP_ST_UC
== ts
) {
712 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
713 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
714 if (mlx4_is_eth(dev
, port
)) {
715 qp_ctx
->pri_path
.mgid_index
+=
716 mlx4_get_base_gid_ix(dev
, slave
, port
);
717 qp_ctx
->pri_path
.mgid_index
&= 0x7f;
719 qp_ctx
->pri_path
.mgid_index
= slave
& 0x7F;
722 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
723 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
724 if (mlx4_is_eth(dev
, port
)) {
725 qp_ctx
->alt_path
.mgid_index
+=
726 mlx4_get_base_gid_ix(dev
, slave
, port
);
727 qp_ctx
->alt_path
.mgid_index
&= 0x7f;
729 qp_ctx
->alt_path
.mgid_index
= slave
& 0x7F;
735 static int handle_counter(struct mlx4_dev
*dev
, struct mlx4_qp_context
*qpc
,
738 static int update_vport_qp_param(struct mlx4_dev
*dev
,
739 struct mlx4_cmd_mailbox
*inbox
,
742 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
743 struct mlx4_vport_oper_state
*vp_oper
;
744 struct mlx4_priv
*priv
;
748 port
= (qpc
->pri_path
.sched_queue
& 0x40) ? 2 : 1;
749 priv
= mlx4_priv(dev
);
750 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
751 qp_type
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
753 err
= handle_counter(dev
, qpc
, slave
, port
);
757 if (MLX4_VGT
!= vp_oper
->state
.default_vlan
) {
758 /* the reserved QPs (special, proxy, tunnel)
759 * do not operate over vlans
761 if (mlx4_is_qp_reserved(dev
, qpn
))
764 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
765 if (qp_type
== MLX4_QP_ST_UD
||
766 (qp_type
== MLX4_QP_ST_MLX
&& mlx4_is_eth(dev
, port
))) {
767 if (dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_VSD_INIT2RTR
) {
768 *(__be32
*)inbox
->buf
=
769 cpu_to_be32(be32_to_cpu(*(__be32
*)inbox
->buf
) |
770 MLX4_QP_OPTPAR_VLAN_STRIPPING
);
771 qpc
->param3
&= ~cpu_to_be32(MLX4_STRIP_VLAN
);
773 struct mlx4_update_qp_params params
= {.flags
= 0};
775 err
= mlx4_update_qp(dev
, qpn
, MLX4_UPDATE_QP_VSD
, ¶ms
);
781 /* preserve IF_COUNTER flag */
782 qpc
->pri_path
.vlan_control
&=
783 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER
;
784 if (vp_oper
->state
.link_state
== IFLA_VF_LINK_STATE_DISABLE
&&
785 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
) {
786 qpc
->pri_path
.vlan_control
|=
787 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
788 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
789 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
790 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
791 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
792 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
793 } else if (0 != vp_oper
->state
.default_vlan
) {
794 if (vp_oper
->state
.vlan_proto
== htons(ETH_P_8021AD
)) {
795 /* vst QinQ should block untagged on TX,
796 * but cvlan is in payload and phv is set so
797 * hw see it as untagged. Block tagged instead.
799 qpc
->pri_path
.vlan_control
|=
800 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
801 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
802 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
803 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
804 } else { /* vst 802.1Q */
805 qpc
->pri_path
.vlan_control
|=
806 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
807 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
808 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
810 } else { /* priority tagged */
811 qpc
->pri_path
.vlan_control
|=
812 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
813 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
816 qpc
->pri_path
.fvl_rx
|= MLX4_FVL_RX_FORCE_ETH_VLAN
;
817 qpc
->pri_path
.vlan_index
= vp_oper
->vlan_idx
;
818 qpc
->pri_path
.fl
|= MLX4_FL_ETH_HIDE_CQE_VLAN
;
819 if (vp_oper
->state
.vlan_proto
== htons(ETH_P_8021AD
))
820 qpc
->pri_path
.fl
|= MLX4_FL_SV
;
822 qpc
->pri_path
.fl
|= MLX4_FL_CV
;
823 qpc
->pri_path
.feup
|= MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
824 qpc
->pri_path
.sched_queue
&= 0xC7;
825 qpc
->pri_path
.sched_queue
|= (vp_oper
->state
.default_qos
) << 3;
826 qpc
->qos_vport
= vp_oper
->state
.qos_vport
;
828 if (vp_oper
->state
.spoofchk
) {
829 qpc
->pri_path
.feup
|= MLX4_FSM_FORCE_ETH_SRC_MAC
;
830 qpc
->pri_path
.grh_mylmc
= (0x80 & qpc
->pri_path
.grh_mylmc
) + vp_oper
->mac_idx
;
836 static int mpt_mask(struct mlx4_dev
*dev
)
838 return dev
->caps
.num_mpts
- 1;
841 static const char *mlx4_resource_type_to_str(enum mlx4_resource t
)
867 return "INVALID RESOURCE";
871 static void *find_res(struct mlx4_dev
*dev
, u64 res_id
,
872 enum mlx4_resource type
)
874 struct mlx4_priv
*priv
= mlx4_priv(dev
);
876 return res_tracker_lookup(&priv
->mfunc
.master
.res_tracker
.res_tree
[type
],
880 static int _get_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
881 enum mlx4_resource type
,
882 void *res
, const char *func_name
)
884 struct res_common
*r
;
887 spin_lock_irq(mlx4_tlock(dev
));
888 r
= find_res(dev
, res_id
, type
);
894 if (r
->state
== RES_ANY_BUSY
) {
896 "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
897 func_name
, slave
, res_id
, mlx4_resource_type_to_str(type
),
903 if (r
->owner
!= slave
) {
908 r
->from_state
= r
->state
;
909 r
->state
= RES_ANY_BUSY
;
910 r
->func_name
= func_name
;
913 *((struct res_common
**)res
) = r
;
916 spin_unlock_irq(mlx4_tlock(dev
));
920 #define get_res(dev, slave, res_id, type, res) \
921 _get_res((dev), (slave), (res_id), (type), (res), __func__)
923 int mlx4_get_slave_from_resource_id(struct mlx4_dev
*dev
,
924 enum mlx4_resource type
,
925 u64 res_id
, int *slave
)
928 struct res_common
*r
;
934 spin_lock(mlx4_tlock(dev
));
936 r
= find_res(dev
, id
, type
);
941 spin_unlock(mlx4_tlock(dev
));
946 static void put_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
947 enum mlx4_resource type
)
949 struct res_common
*r
;
951 spin_lock_irq(mlx4_tlock(dev
));
952 r
= find_res(dev
, res_id
, type
);
954 r
->state
= r
->from_state
;
957 spin_unlock_irq(mlx4_tlock(dev
));
960 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
961 u64 in_param
, u64
*out_param
, int port
);
963 static int handle_existing_counter(struct mlx4_dev
*dev
, u8 slave
, int port
,
966 struct res_common
*r
;
967 struct res_counter
*counter
;
970 if (counter_index
== MLX4_SINK_COUNTER_INDEX(dev
))
973 spin_lock_irq(mlx4_tlock(dev
));
974 r
= find_res(dev
, counter_index
, RES_COUNTER
);
975 if (!r
|| r
->owner
!= slave
) {
978 counter
= container_of(r
, struct res_counter
, com
);
980 counter
->port
= port
;
983 spin_unlock_irq(mlx4_tlock(dev
));
987 static int handle_unexisting_counter(struct mlx4_dev
*dev
,
988 struct mlx4_qp_context
*qpc
, u8 slave
,
991 struct mlx4_priv
*priv
= mlx4_priv(dev
);
992 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
993 struct res_common
*tmp
;
994 struct res_counter
*counter
;
995 u64 counter_idx
= MLX4_SINK_COUNTER_INDEX(dev
);
998 spin_lock_irq(mlx4_tlock(dev
));
999 list_for_each_entry(tmp
,
1000 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
],
1002 counter
= container_of(tmp
, struct res_counter
, com
);
1003 if (port
== counter
->port
) {
1004 qpc
->pri_path
.counter_index
= counter
->com
.res_id
;
1005 spin_unlock_irq(mlx4_tlock(dev
));
1009 spin_unlock_irq(mlx4_tlock(dev
));
1011 /* No existing counter, need to allocate a new counter */
1012 err
= counter_alloc_res(dev
, slave
, RES_OP_RESERVE
, 0, 0, &counter_idx
,
1014 if (err
== -ENOENT
) {
1016 } else if (err
&& err
!= -ENOSPC
) {
1017 mlx4_err(dev
, "%s: failed to create new counter for slave %d err %d\n",
1018 __func__
, slave
, err
);
1020 qpc
->pri_path
.counter_index
= counter_idx
;
1021 mlx4_dbg(dev
, "%s: alloc new counter for slave %d index %d\n",
1022 __func__
, slave
, qpc
->pri_path
.counter_index
);
1029 static int handle_counter(struct mlx4_dev
*dev
, struct mlx4_qp_context
*qpc
,
1032 if (qpc
->pri_path
.counter_index
!= MLX4_SINK_COUNTER_INDEX(dev
))
1033 return handle_existing_counter(dev
, slave
, port
,
1034 qpc
->pri_path
.counter_index
);
1036 return handle_unexisting_counter(dev
, qpc
, slave
, port
);
1039 static struct res_common
*alloc_qp_tr(int id
)
1043 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1047 ret
->com
.res_id
= id
;
1048 ret
->com
.state
= RES_QP_RESERVED
;
1049 ret
->local_qpn
= id
;
1050 INIT_LIST_HEAD(&ret
->mcg_list
);
1051 spin_lock_init(&ret
->mcg_spl
);
1052 atomic_set(&ret
->ref_count
, 0);
1057 static struct res_common
*alloc_mtt_tr(int id
, int order
)
1059 struct res_mtt
*ret
;
1061 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1065 ret
->com
.res_id
= id
;
1067 ret
->com
.state
= RES_MTT_ALLOCATED
;
1068 atomic_set(&ret
->ref_count
, 0);
1073 static struct res_common
*alloc_mpt_tr(int id
, int key
)
1075 struct res_mpt
*ret
;
1077 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1081 ret
->com
.res_id
= id
;
1082 ret
->com
.state
= RES_MPT_RESERVED
;
1088 static struct res_common
*alloc_eq_tr(int id
)
1092 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1096 ret
->com
.res_id
= id
;
1097 ret
->com
.state
= RES_EQ_RESERVED
;
1102 static struct res_common
*alloc_cq_tr(int id
)
1106 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1110 ret
->com
.res_id
= id
;
1111 ret
->com
.state
= RES_CQ_ALLOCATED
;
1112 atomic_set(&ret
->ref_count
, 0);
1117 static struct res_common
*alloc_srq_tr(int id
)
1119 struct res_srq
*ret
;
1121 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1125 ret
->com
.res_id
= id
;
1126 ret
->com
.state
= RES_SRQ_ALLOCATED
;
1127 atomic_set(&ret
->ref_count
, 0);
1132 static struct res_common
*alloc_counter_tr(int id
, int port
)
1134 struct res_counter
*ret
;
1136 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1140 ret
->com
.res_id
= id
;
1141 ret
->com
.state
= RES_COUNTER_ALLOCATED
;
1147 static struct res_common
*alloc_xrcdn_tr(int id
)
1149 struct res_xrcdn
*ret
;
1151 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1155 ret
->com
.res_id
= id
;
1156 ret
->com
.state
= RES_XRCD_ALLOCATED
;
1161 static struct res_common
*alloc_fs_rule_tr(u64 id
, int qpn
)
1163 struct res_fs_rule
*ret
;
1165 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1169 ret
->com
.res_id
= id
;
1170 ret
->com
.state
= RES_FS_RULE_ALLOCATED
;
1175 static struct res_common
*alloc_tr(u64 id
, enum mlx4_resource type
, int slave
,
1178 struct res_common
*ret
;
1182 ret
= alloc_qp_tr(id
);
1185 ret
= alloc_mpt_tr(id
, extra
);
1188 ret
= alloc_mtt_tr(id
, extra
);
1191 ret
= alloc_eq_tr(id
);
1194 ret
= alloc_cq_tr(id
);
1197 ret
= alloc_srq_tr(id
);
1200 pr_err("implementation missing\n");
1203 ret
= alloc_counter_tr(id
, extra
);
1206 ret
= alloc_xrcdn_tr(id
);
1209 ret
= alloc_fs_rule_tr(id
, extra
);
1220 int mlx4_calc_vf_counters(struct mlx4_dev
*dev
, int slave
, int port
,
1221 struct mlx4_counter
*data
)
1223 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1224 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1225 struct res_common
*tmp
;
1226 struct res_counter
*counter
;
1230 memset(data
, 0, sizeof(*data
));
1232 counters_arr
= kmalloc_array(dev
->caps
.max_counters
,
1233 sizeof(*counters_arr
), GFP_KERNEL
);
1237 spin_lock_irq(mlx4_tlock(dev
));
1238 list_for_each_entry(tmp
,
1239 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
],
1241 counter
= container_of(tmp
, struct res_counter
, com
);
1242 if (counter
->port
== port
) {
1243 counters_arr
[i
] = (int)tmp
->res_id
;
1247 spin_unlock_irq(mlx4_tlock(dev
));
1248 counters_arr
[i
] = -1;
1252 while (counters_arr
[i
] != -1) {
1253 err
= mlx4_get_counter_stats(dev
, counters_arr
[i
], data
,
1256 memset(data
, 0, sizeof(*data
));
1263 kfree(counters_arr
);
1267 static int add_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1268 enum mlx4_resource type
, int extra
)
1272 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1273 struct res_common
**res_arr
;
1274 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1275 struct rb_root
*root
= &tracker
->res_tree
[type
];
1277 res_arr
= kzalloc(count
* sizeof *res_arr
, GFP_KERNEL
);
1281 for (i
= 0; i
< count
; ++i
) {
1282 res_arr
[i
] = alloc_tr(base
+ i
, type
, slave
, extra
);
1284 for (--i
; i
>= 0; --i
)
1292 spin_lock_irq(mlx4_tlock(dev
));
1293 for (i
= 0; i
< count
; ++i
) {
1294 if (find_res(dev
, base
+ i
, type
)) {
1298 err
= res_tracker_insert(root
, res_arr
[i
]);
1301 list_add_tail(&res_arr
[i
]->list
,
1302 &tracker
->slave_list
[slave
].res_list
[type
]);
1304 spin_unlock_irq(mlx4_tlock(dev
));
1310 for (--i
; i
>= 0; --i
) {
1311 rb_erase(&res_arr
[i
]->node
, root
);
1312 list_del_init(&res_arr
[i
]->list
);
1315 spin_unlock_irq(mlx4_tlock(dev
));
1317 for (i
= 0; i
< count
; ++i
)
1325 static int remove_qp_ok(struct res_qp
*res
)
1327 if (res
->com
.state
== RES_QP_BUSY
|| atomic_read(&res
->ref_count
) ||
1328 !list_empty(&res
->mcg_list
)) {
1329 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1330 res
->com
.state
, atomic_read(&res
->ref_count
));
1332 } else if (res
->com
.state
!= RES_QP_RESERVED
) {
1339 static int remove_mtt_ok(struct res_mtt
*res
, int order
)
1341 if (res
->com
.state
== RES_MTT_BUSY
||
1342 atomic_read(&res
->ref_count
)) {
1343 pr_devel("%s-%d: state %s, ref_count %d\n",
1345 mtt_states_str(res
->com
.state
),
1346 atomic_read(&res
->ref_count
));
1348 } else if (res
->com
.state
!= RES_MTT_ALLOCATED
)
1350 else if (res
->order
!= order
)
1356 static int remove_mpt_ok(struct res_mpt
*res
)
1358 if (res
->com
.state
== RES_MPT_BUSY
)
1360 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1366 static int remove_eq_ok(struct res_eq
*res
)
1368 if (res
->com
.state
== RES_MPT_BUSY
)
1370 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1376 static int remove_counter_ok(struct res_counter
*res
)
1378 if (res
->com
.state
== RES_COUNTER_BUSY
)
1380 else if (res
->com
.state
!= RES_COUNTER_ALLOCATED
)
1386 static int remove_xrcdn_ok(struct res_xrcdn
*res
)
1388 if (res
->com
.state
== RES_XRCD_BUSY
)
1390 else if (res
->com
.state
!= RES_XRCD_ALLOCATED
)
1396 static int remove_fs_rule_ok(struct res_fs_rule
*res
)
1398 if (res
->com
.state
== RES_FS_RULE_BUSY
)
1400 else if (res
->com
.state
!= RES_FS_RULE_ALLOCATED
)
1406 static int remove_cq_ok(struct res_cq
*res
)
1408 if (res
->com
.state
== RES_CQ_BUSY
)
1410 else if (res
->com
.state
!= RES_CQ_ALLOCATED
)
1416 static int remove_srq_ok(struct res_srq
*res
)
1418 if (res
->com
.state
== RES_SRQ_BUSY
)
1420 else if (res
->com
.state
!= RES_SRQ_ALLOCATED
)
1426 static int remove_ok(struct res_common
*res
, enum mlx4_resource type
, int extra
)
1430 return remove_qp_ok((struct res_qp
*)res
);
1432 return remove_cq_ok((struct res_cq
*)res
);
1434 return remove_srq_ok((struct res_srq
*)res
);
1436 return remove_mpt_ok((struct res_mpt
*)res
);
1438 return remove_mtt_ok((struct res_mtt
*)res
, extra
);
1442 return remove_eq_ok((struct res_eq
*)res
);
1444 return remove_counter_ok((struct res_counter
*)res
);
1446 return remove_xrcdn_ok((struct res_xrcdn
*)res
);
1448 return remove_fs_rule_ok((struct res_fs_rule
*)res
);
1454 static int rem_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1455 enum mlx4_resource type
, int extra
)
1459 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1460 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1461 struct res_common
*r
;
1463 spin_lock_irq(mlx4_tlock(dev
));
1464 for (i
= base
; i
< base
+ count
; ++i
) {
1465 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1470 if (r
->owner
!= slave
) {
1474 err
= remove_ok(r
, type
, extra
);
1479 for (i
= base
; i
< base
+ count
; ++i
) {
1480 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1481 rb_erase(&r
->node
, &tracker
->res_tree
[type
]);
1488 spin_unlock_irq(mlx4_tlock(dev
));
1493 static int qp_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int qpn
,
1494 enum res_qp_states state
, struct res_qp
**qp
,
1497 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1498 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1502 spin_lock_irq(mlx4_tlock(dev
));
1503 r
= res_tracker_lookup(&tracker
->res_tree
[RES_QP
], qpn
);
1506 else if (r
->com
.owner
!= slave
)
1511 mlx4_dbg(dev
, "%s: failed RES_QP, 0x%llx\n",
1512 __func__
, r
->com
.res_id
);
1516 case RES_QP_RESERVED
:
1517 if (r
->com
.state
== RES_QP_MAPPED
&& !alloc
)
1520 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n", r
->com
.res_id
);
1525 if ((r
->com
.state
== RES_QP_RESERVED
&& alloc
) ||
1526 r
->com
.state
== RES_QP_HW
)
1529 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n",
1537 if (r
->com
.state
!= RES_QP_MAPPED
)
1545 r
->com
.from_state
= r
->com
.state
;
1546 r
->com
.to_state
= state
;
1547 r
->com
.state
= RES_QP_BUSY
;
1553 spin_unlock_irq(mlx4_tlock(dev
));
1558 static int mr_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1559 enum res_mpt_states state
, struct res_mpt
**mpt
)
1561 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1562 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1566 spin_lock_irq(mlx4_tlock(dev
));
1567 r
= res_tracker_lookup(&tracker
->res_tree
[RES_MPT
], index
);
1570 else if (r
->com
.owner
!= slave
)
1578 case RES_MPT_RESERVED
:
1579 if (r
->com
.state
!= RES_MPT_MAPPED
)
1583 case RES_MPT_MAPPED
:
1584 if (r
->com
.state
!= RES_MPT_RESERVED
&&
1585 r
->com
.state
!= RES_MPT_HW
)
1590 if (r
->com
.state
!= RES_MPT_MAPPED
)
1598 r
->com
.from_state
= r
->com
.state
;
1599 r
->com
.to_state
= state
;
1600 r
->com
.state
= RES_MPT_BUSY
;
1606 spin_unlock_irq(mlx4_tlock(dev
));
1611 static int eq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1612 enum res_eq_states state
, struct res_eq
**eq
)
1614 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1615 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1619 spin_lock_irq(mlx4_tlock(dev
));
1620 r
= res_tracker_lookup(&tracker
->res_tree
[RES_EQ
], index
);
1623 else if (r
->com
.owner
!= slave
)
1631 case RES_EQ_RESERVED
:
1632 if (r
->com
.state
!= RES_EQ_HW
)
1637 if (r
->com
.state
!= RES_EQ_RESERVED
)
1646 r
->com
.from_state
= r
->com
.state
;
1647 r
->com
.to_state
= state
;
1648 r
->com
.state
= RES_EQ_BUSY
;
1652 spin_unlock_irq(mlx4_tlock(dev
));
1660 static int cq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int cqn
,
1661 enum res_cq_states state
, struct res_cq
**cq
)
1663 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1664 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1668 spin_lock_irq(mlx4_tlock(dev
));
1669 r
= res_tracker_lookup(&tracker
->res_tree
[RES_CQ
], cqn
);
1672 } else if (r
->com
.owner
!= slave
) {
1674 } else if (state
== RES_CQ_ALLOCATED
) {
1675 if (r
->com
.state
!= RES_CQ_HW
)
1677 else if (atomic_read(&r
->ref_count
))
1681 } else if (state
!= RES_CQ_HW
|| r
->com
.state
!= RES_CQ_ALLOCATED
) {
1688 r
->com
.from_state
= r
->com
.state
;
1689 r
->com
.to_state
= state
;
1690 r
->com
.state
= RES_CQ_BUSY
;
1695 spin_unlock_irq(mlx4_tlock(dev
));
1700 static int srq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1701 enum res_srq_states state
, struct res_srq
**srq
)
1703 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1704 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1708 spin_lock_irq(mlx4_tlock(dev
));
1709 r
= res_tracker_lookup(&tracker
->res_tree
[RES_SRQ
], index
);
1712 } else if (r
->com
.owner
!= slave
) {
1714 } else if (state
== RES_SRQ_ALLOCATED
) {
1715 if (r
->com
.state
!= RES_SRQ_HW
)
1717 else if (atomic_read(&r
->ref_count
))
1719 } else if (state
!= RES_SRQ_HW
|| r
->com
.state
!= RES_SRQ_ALLOCATED
) {
1724 r
->com
.from_state
= r
->com
.state
;
1725 r
->com
.to_state
= state
;
1726 r
->com
.state
= RES_SRQ_BUSY
;
1731 spin_unlock_irq(mlx4_tlock(dev
));
1736 static void res_abort_move(struct mlx4_dev
*dev
, int slave
,
1737 enum mlx4_resource type
, int id
)
1739 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1740 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1741 struct res_common
*r
;
1743 spin_lock_irq(mlx4_tlock(dev
));
1744 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1745 if (r
&& (r
->owner
== slave
))
1746 r
->state
= r
->from_state
;
1747 spin_unlock_irq(mlx4_tlock(dev
));
1750 static void res_end_move(struct mlx4_dev
*dev
, int slave
,
1751 enum mlx4_resource type
, int id
)
1753 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1754 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1755 struct res_common
*r
;
1757 spin_lock_irq(mlx4_tlock(dev
));
1758 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1759 if (r
&& (r
->owner
== slave
))
1760 r
->state
= r
->to_state
;
1761 spin_unlock_irq(mlx4_tlock(dev
));
1764 static int valid_reserved(struct mlx4_dev
*dev
, int slave
, int qpn
)
1766 return mlx4_is_qp_reserved(dev
, qpn
) &&
1767 (mlx4_is_master(dev
) || mlx4_is_guest_proxy(dev
, slave
, qpn
));
1770 static int fw_reserved(struct mlx4_dev
*dev
, int qpn
)
1772 return qpn
< dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
1775 static int qp_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1776 u64 in_param
, u64
*out_param
)
1786 case RES_OP_RESERVE
:
1787 count
= get_param_l(&in_param
) & 0xffffff;
1788 /* Turn off all unsupported QP allocation flags that the
1789 * slave tries to set.
1791 flags
= (get_param_l(&in_param
) >> 24) & dev
->caps
.alloc_res_qp_mask
;
1792 align
= get_param_h(&in_param
);
1793 err
= mlx4_grant_resource(dev
, slave
, RES_QP
, count
, 0);
1797 err
= __mlx4_qp_reserve_range(dev
, count
, align
, &base
, flags
);
1799 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1803 err
= add_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1805 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1806 __mlx4_qp_release_range(dev
, base
, count
);
1809 set_param_l(out_param
, base
);
1811 case RES_OP_MAP_ICM
:
1812 qpn
= get_param_l(&in_param
) & 0x7fffff;
1813 if (valid_reserved(dev
, slave
, qpn
)) {
1814 err
= add_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1819 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
,
1824 if (!fw_reserved(dev
, qpn
)) {
1825 err
= __mlx4_qp_alloc_icm(dev
, qpn
, GFP_KERNEL
);
1827 res_abort_move(dev
, slave
, RES_QP
, qpn
);
1832 res_end_move(dev
, slave
, RES_QP
, qpn
);
1842 static int mtt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1843 u64 in_param
, u64
*out_param
)
1849 if (op
!= RES_OP_RESERVE_AND_MAP
)
1852 order
= get_param_l(&in_param
);
1854 err
= mlx4_grant_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1858 base
= __mlx4_alloc_mtt_range(dev
, order
);
1860 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1864 err
= add_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1866 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1867 __mlx4_free_mtt_range(dev
, base
, order
);
1869 set_param_l(out_param
, base
);
1875 static int mpt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1876 u64 in_param
, u64
*out_param
)
1881 struct res_mpt
*mpt
;
1884 case RES_OP_RESERVE
:
1885 err
= mlx4_grant_resource(dev
, slave
, RES_MPT
, 1, 0);
1889 index
= __mlx4_mpt_reserve(dev
);
1891 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1894 id
= index
& mpt_mask(dev
);
1896 err
= add_res_range(dev
, slave
, id
, 1, RES_MPT
, index
);
1898 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1899 __mlx4_mpt_release(dev
, index
);
1902 set_param_l(out_param
, index
);
1904 case RES_OP_MAP_ICM
:
1905 index
= get_param_l(&in_param
);
1906 id
= index
& mpt_mask(dev
);
1907 err
= mr_res_start_move_to(dev
, slave
, id
,
1908 RES_MPT_MAPPED
, &mpt
);
1912 err
= __mlx4_mpt_alloc_icm(dev
, mpt
->key
, GFP_KERNEL
);
1914 res_abort_move(dev
, slave
, RES_MPT
, id
);
1918 res_end_move(dev
, slave
, RES_MPT
, id
);
1924 static int cq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1925 u64 in_param
, u64
*out_param
)
1931 case RES_OP_RESERVE_AND_MAP
:
1932 err
= mlx4_grant_resource(dev
, slave
, RES_CQ
, 1, 0);
1936 err
= __mlx4_cq_alloc_icm(dev
, &cqn
);
1938 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1942 err
= add_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1944 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1945 __mlx4_cq_free_icm(dev
, cqn
);
1949 set_param_l(out_param
, cqn
);
1959 static int srq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1960 u64 in_param
, u64
*out_param
)
1966 case RES_OP_RESERVE_AND_MAP
:
1967 err
= mlx4_grant_resource(dev
, slave
, RES_SRQ
, 1, 0);
1971 err
= __mlx4_srq_alloc_icm(dev
, &srqn
);
1973 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1977 err
= add_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1979 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1980 __mlx4_srq_free_icm(dev
, srqn
);
1984 set_param_l(out_param
, srqn
);
1994 static int mac_find_smac_ix_in_slave(struct mlx4_dev
*dev
, int slave
, int port
,
1995 u8 smac_index
, u64
*mac
)
1997 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1998 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1999 struct list_head
*mac_list
=
2000 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
2001 struct mac_res
*res
, *tmp
;
2003 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
2004 if (res
->smac_index
== smac_index
&& res
->port
== (u8
) port
) {
2012 static int mac_add_to_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
, int port
, u8 smac_index
)
2014 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2015 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2016 struct list_head
*mac_list
=
2017 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
2018 struct mac_res
*res
, *tmp
;
2020 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
2021 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
2022 /* mac found. update ref count */
2028 if (mlx4_grant_resource(dev
, slave
, RES_MAC
, 1, port
))
2030 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
2032 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
2036 res
->port
= (u8
) port
;
2037 res
->smac_index
= smac_index
;
2039 list_add_tail(&res
->list
,
2040 &tracker
->slave_list
[slave
].res_list
[RES_MAC
]);
2044 static void mac_del_from_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
,
2047 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2048 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2049 struct list_head
*mac_list
=
2050 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
2051 struct mac_res
*res
, *tmp
;
2053 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
2054 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
2055 if (!--res
->ref_count
) {
2056 list_del(&res
->list
);
2057 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
2065 static void rem_slave_macs(struct mlx4_dev
*dev
, int slave
)
2067 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2068 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2069 struct list_head
*mac_list
=
2070 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
2071 struct mac_res
*res
, *tmp
;
2074 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
2075 list_del(&res
->list
);
2076 /* dereference the mac the num times the slave referenced it */
2077 for (i
= 0; i
< res
->ref_count
; i
++)
2078 __mlx4_unregister_mac(dev
, res
->port
, res
->mac
);
2079 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, res
->port
);
2084 static int mac_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2085 u64 in_param
, u64
*out_param
, int in_port
)
2092 if (op
!= RES_OP_RESERVE_AND_MAP
)
2095 port
= !in_port
? get_param_l(out_param
) : in_port
;
2096 port
= mlx4_slave_convert_port(
2103 err
= __mlx4_register_mac(dev
, port
, mac
);
2106 set_param_l(out_param
, err
);
2111 err
= mac_add_to_slave(dev
, slave
, mac
, port
, smac_index
);
2113 __mlx4_unregister_mac(dev
, port
, mac
);
2118 static int vlan_add_to_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
2119 int port
, int vlan_index
)
2121 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2122 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2123 struct list_head
*vlan_list
=
2124 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2125 struct vlan_res
*res
, *tmp
;
2127 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2128 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
2129 /* vlan found. update ref count */
2135 if (mlx4_grant_resource(dev
, slave
, RES_VLAN
, 1, port
))
2137 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
2139 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, port
);
2143 res
->port
= (u8
) port
;
2144 res
->vlan_index
= vlan_index
;
2146 list_add_tail(&res
->list
,
2147 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
]);
2152 static void vlan_del_from_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
2155 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2156 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2157 struct list_head
*vlan_list
=
2158 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2159 struct vlan_res
*res
, *tmp
;
2161 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2162 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
2163 if (!--res
->ref_count
) {
2164 list_del(&res
->list
);
2165 mlx4_release_resource(dev
, slave
, RES_VLAN
,
2174 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
)
2176 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2177 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2178 struct list_head
*vlan_list
=
2179 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2180 struct vlan_res
*res
, *tmp
;
2183 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2184 list_del(&res
->list
);
2185 /* dereference the vlan the num times the slave referenced it */
2186 for (i
= 0; i
< res
->ref_count
; i
++)
2187 __mlx4_unregister_vlan(dev
, res
->port
, res
->vlan
);
2188 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, res
->port
);
2193 static int vlan_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2194 u64 in_param
, u64
*out_param
, int in_port
)
2196 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2197 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2203 port
= !in_port
? get_param_l(out_param
) : in_port
;
2205 if (!port
|| op
!= RES_OP_RESERVE_AND_MAP
)
2208 port
= mlx4_slave_convert_port(
2213 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2214 if (!in_port
&& port
> 0 && port
<= dev
->caps
.num_ports
) {
2215 slave_state
[slave
].old_vlan_api
= true;
2219 vlan
= (u16
) in_param
;
2221 err
= __mlx4_register_vlan(dev
, port
, vlan
, &vlan_index
);
2223 set_param_l(out_param
, (u32
) vlan_index
);
2224 err
= vlan_add_to_slave(dev
, slave
, vlan
, port
, vlan_index
);
2226 __mlx4_unregister_vlan(dev
, port
, vlan
);
2231 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2232 u64 in_param
, u64
*out_param
, int port
)
2237 if (op
!= RES_OP_RESERVE
)
2240 err
= mlx4_grant_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2244 err
= __mlx4_counter_alloc(dev
, &index
);
2246 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2250 err
= add_res_range(dev
, slave
, index
, 1, RES_COUNTER
, port
);
2252 __mlx4_counter_free(dev
, index
);
2253 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2255 set_param_l(out_param
, index
);
2261 static int xrcdn_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2262 u64 in_param
, u64
*out_param
)
2267 if (op
!= RES_OP_RESERVE
)
2270 err
= __mlx4_xrcd_alloc(dev
, &xrcdn
);
2274 err
= add_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2276 __mlx4_xrcd_free(dev
, xrcdn
);
2278 set_param_l(out_param
, xrcdn
);
2283 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2284 struct mlx4_vhcr
*vhcr
,
2285 struct mlx4_cmd_mailbox
*inbox
,
2286 struct mlx4_cmd_mailbox
*outbox
,
2287 struct mlx4_cmd_info
*cmd
)
2290 int alop
= vhcr
->op_modifier
;
2292 switch (vhcr
->in_modifier
& 0xFF) {
2294 err
= qp_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2295 vhcr
->in_param
, &vhcr
->out_param
);
2299 err
= mtt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2300 vhcr
->in_param
, &vhcr
->out_param
);
2304 err
= mpt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2305 vhcr
->in_param
, &vhcr
->out_param
);
2309 err
= cq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2310 vhcr
->in_param
, &vhcr
->out_param
);
2314 err
= srq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2315 vhcr
->in_param
, &vhcr
->out_param
);
2319 err
= mac_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2320 vhcr
->in_param
, &vhcr
->out_param
,
2321 (vhcr
->in_modifier
>> 8) & 0xFF);
2325 err
= vlan_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2326 vhcr
->in_param
, &vhcr
->out_param
,
2327 (vhcr
->in_modifier
>> 8) & 0xFF);
2331 err
= counter_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2332 vhcr
->in_param
, &vhcr
->out_param
, 0);
2336 err
= xrcdn_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2337 vhcr
->in_param
, &vhcr
->out_param
);
2348 static int qp_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2357 case RES_OP_RESERVE
:
2358 base
= get_param_l(&in_param
) & 0x7fffff;
2359 count
= get_param_h(&in_param
);
2360 err
= rem_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
2363 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
2364 __mlx4_qp_release_range(dev
, base
, count
);
2366 case RES_OP_MAP_ICM
:
2367 qpn
= get_param_l(&in_param
) & 0x7fffff;
2368 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_RESERVED
,
2373 if (!fw_reserved(dev
, qpn
))
2374 __mlx4_qp_free_icm(dev
, qpn
);
2376 res_end_move(dev
, slave
, RES_QP
, qpn
);
2378 if (valid_reserved(dev
, slave
, qpn
))
2379 err
= rem_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
2388 static int mtt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2389 u64 in_param
, u64
*out_param
)
2395 if (op
!= RES_OP_RESERVE_AND_MAP
)
2398 base
= get_param_l(&in_param
);
2399 order
= get_param_h(&in_param
);
2400 err
= rem_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
2402 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
2403 __mlx4_free_mtt_range(dev
, base
, order
);
2408 static int mpt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2414 struct res_mpt
*mpt
;
2417 case RES_OP_RESERVE
:
2418 index
= get_param_l(&in_param
);
2419 id
= index
& mpt_mask(dev
);
2420 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2424 put_res(dev
, slave
, id
, RES_MPT
);
2426 err
= rem_res_range(dev
, slave
, id
, 1, RES_MPT
, 0);
2429 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
2430 __mlx4_mpt_release(dev
, index
);
2432 case RES_OP_MAP_ICM
:
2433 index
= get_param_l(&in_param
);
2434 id
= index
& mpt_mask(dev
);
2435 err
= mr_res_start_move_to(dev
, slave
, id
,
2436 RES_MPT_RESERVED
, &mpt
);
2440 __mlx4_mpt_free_icm(dev
, mpt
->key
);
2441 res_end_move(dev
, slave
, RES_MPT
, id
);
2450 static int cq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2451 u64 in_param
, u64
*out_param
)
2457 case RES_OP_RESERVE_AND_MAP
:
2458 cqn
= get_param_l(&in_param
);
2459 err
= rem_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
2463 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
2464 __mlx4_cq_free_icm(dev
, cqn
);
2475 static int srq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2476 u64 in_param
, u64
*out_param
)
2482 case RES_OP_RESERVE_AND_MAP
:
2483 srqn
= get_param_l(&in_param
);
2484 err
= rem_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
2488 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
2489 __mlx4_srq_free_icm(dev
, srqn
);
2500 static int mac_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2501 u64 in_param
, u64
*out_param
, int in_port
)
2507 case RES_OP_RESERVE_AND_MAP
:
2508 port
= !in_port
? get_param_l(out_param
) : in_port
;
2509 port
= mlx4_slave_convert_port(
2514 mac_del_from_slave(dev
, slave
, in_param
, port
);
2515 __mlx4_unregister_mac(dev
, port
, in_param
);
2526 static int vlan_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2527 u64 in_param
, u64
*out_param
, int port
)
2529 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2530 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2533 port
= mlx4_slave_convert_port(
2539 case RES_OP_RESERVE_AND_MAP
:
2540 if (slave_state
[slave
].old_vlan_api
)
2544 vlan_del_from_slave(dev
, slave
, in_param
, port
);
2545 __mlx4_unregister_vlan(dev
, port
, in_param
);
2555 static int counter_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2556 u64 in_param
, u64
*out_param
)
2561 if (op
!= RES_OP_RESERVE
)
2564 index
= get_param_l(&in_param
);
2565 if (index
== MLX4_SINK_COUNTER_INDEX(dev
))
2568 err
= rem_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
2572 __mlx4_counter_free(dev
, index
);
2573 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2578 static int xrcdn_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2579 u64 in_param
, u64
*out_param
)
2584 if (op
!= RES_OP_RESERVE
)
2587 xrcdn
= get_param_l(&in_param
);
2588 err
= rem_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2592 __mlx4_xrcd_free(dev
, xrcdn
);
2597 int mlx4_FREE_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2598 struct mlx4_vhcr
*vhcr
,
2599 struct mlx4_cmd_mailbox
*inbox
,
2600 struct mlx4_cmd_mailbox
*outbox
,
2601 struct mlx4_cmd_info
*cmd
)
2604 int alop
= vhcr
->op_modifier
;
2606 switch (vhcr
->in_modifier
& 0xFF) {
2608 err
= qp_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2613 err
= mtt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2614 vhcr
->in_param
, &vhcr
->out_param
);
2618 err
= mpt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2623 err
= cq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2624 vhcr
->in_param
, &vhcr
->out_param
);
2628 err
= srq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2629 vhcr
->in_param
, &vhcr
->out_param
);
2633 err
= mac_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2634 vhcr
->in_param
, &vhcr
->out_param
,
2635 (vhcr
->in_modifier
>> 8) & 0xFF);
2639 err
= vlan_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2640 vhcr
->in_param
, &vhcr
->out_param
,
2641 (vhcr
->in_modifier
>> 8) & 0xFF);
2645 err
= counter_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2646 vhcr
->in_param
, &vhcr
->out_param
);
2650 err
= xrcdn_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2651 vhcr
->in_param
, &vhcr
->out_param
);
2659 /* ugly but other choices are uglier */
2660 static int mr_phys_mpt(struct mlx4_mpt_entry
*mpt
)
2662 return (be32_to_cpu(mpt
->flags
) >> 9) & 1;
2665 static int mr_get_mtt_addr(struct mlx4_mpt_entry
*mpt
)
2667 return (int)be64_to_cpu(mpt
->mtt_addr
) & 0xfffffff8;
2670 static int mr_get_mtt_size(struct mlx4_mpt_entry
*mpt
)
2672 return be32_to_cpu(mpt
->mtt_sz
);
2675 static u32
mr_get_pd(struct mlx4_mpt_entry
*mpt
)
2677 return be32_to_cpu(mpt
->pd_flags
) & 0x00ffffff;
2680 static int mr_is_fmr(struct mlx4_mpt_entry
*mpt
)
2682 return be32_to_cpu(mpt
->pd_flags
) & MLX4_MPT_PD_FLAG_FAST_REG
;
2685 static int mr_is_bind_enabled(struct mlx4_mpt_entry
*mpt
)
2687 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_BIND_ENABLE
;
2690 static int mr_is_region(struct mlx4_mpt_entry
*mpt
)
2692 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_REGION
;
2695 static int qp_get_mtt_addr(struct mlx4_qp_context
*qpc
)
2697 return be32_to_cpu(qpc
->mtt_base_addr_l
) & 0xfffffff8;
2700 static int srq_get_mtt_addr(struct mlx4_srq_context
*srqc
)
2702 return be32_to_cpu(srqc
->mtt_base_addr_l
) & 0xfffffff8;
2705 static int qp_get_mtt_size(struct mlx4_qp_context
*qpc
)
2707 int page_shift
= (qpc
->log_page_size
& 0x3f) + 12;
2708 int log_sq_size
= (qpc
->sq_size_stride
>> 3) & 0xf;
2709 int log_sq_sride
= qpc
->sq_size_stride
& 7;
2710 int log_rq_size
= (qpc
->rq_size_stride
>> 3) & 0xf;
2711 int log_rq_stride
= qpc
->rq_size_stride
& 7;
2712 int srq
= (be32_to_cpu(qpc
->srqn
) >> 24) & 1;
2713 int rss
= (be32_to_cpu(qpc
->flags
) >> 13) & 1;
2714 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
2715 int xrc
= (ts
== MLX4_QP_ST_XRC
) ? 1 : 0;
2720 int page_offset
= (be32_to_cpu(qpc
->params2
) >> 6) & 0x3f;
2722 sq_size
= 1 << (log_sq_size
+ log_sq_sride
+ 4);
2723 rq_size
= (srq
|rss
|xrc
) ? 0 : (1 << (log_rq_size
+ log_rq_stride
+ 4));
2724 total_mem
= sq_size
+ rq_size
;
2726 roundup_pow_of_two((total_mem
+ (page_offset
<< 6)) >>
2732 static int check_mtt_range(struct mlx4_dev
*dev
, int slave
, int start
,
2733 int size
, struct res_mtt
*mtt
)
2735 int res_start
= mtt
->com
.res_id
;
2736 int res_size
= (1 << mtt
->order
);
2738 if (start
< res_start
|| start
+ size
> res_start
+ res_size
)
2743 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2744 struct mlx4_vhcr
*vhcr
,
2745 struct mlx4_cmd_mailbox
*inbox
,
2746 struct mlx4_cmd_mailbox
*outbox
,
2747 struct mlx4_cmd_info
*cmd
)
2750 int index
= vhcr
->in_modifier
;
2751 struct res_mtt
*mtt
;
2752 struct res_mpt
*mpt
= NULL
;
2753 int mtt_base
= mr_get_mtt_addr(inbox
->buf
) / dev
->caps
.mtt_entry_sz
;
2759 id
= index
& mpt_mask(dev
);
2760 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_HW
, &mpt
);
2764 /* Disable memory windows for VFs. */
2765 if (!mr_is_region(inbox
->buf
)) {
2770 /* Make sure that the PD bits related to the slave id are zeros. */
2771 pd
= mr_get_pd(inbox
->buf
);
2772 pd_slave
= (pd
>> 17) & 0x7f;
2773 if (pd_slave
!= 0 && --pd_slave
!= slave
) {
2778 if (mr_is_fmr(inbox
->buf
)) {
2779 /* FMR and Bind Enable are forbidden in slave devices. */
2780 if (mr_is_bind_enabled(inbox
->buf
)) {
2784 /* FMR and Memory Windows are also forbidden. */
2785 if (!mr_is_region(inbox
->buf
)) {
2791 phys
= mr_phys_mpt(inbox
->buf
);
2793 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2797 err
= check_mtt_range(dev
, slave
, mtt_base
,
2798 mr_get_mtt_size(inbox
->buf
), mtt
);
2805 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2810 atomic_inc(&mtt
->ref_count
);
2811 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2814 res_end_move(dev
, slave
, RES_MPT
, id
);
2819 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2821 res_abort_move(dev
, slave
, RES_MPT
, id
);
2826 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2827 struct mlx4_vhcr
*vhcr
,
2828 struct mlx4_cmd_mailbox
*inbox
,
2829 struct mlx4_cmd_mailbox
*outbox
,
2830 struct mlx4_cmd_info
*cmd
)
2833 int index
= vhcr
->in_modifier
;
2834 struct res_mpt
*mpt
;
2837 id
= index
& mpt_mask(dev
);
2838 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_MAPPED
, &mpt
);
2842 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2847 atomic_dec(&mpt
->mtt
->ref_count
);
2849 res_end_move(dev
, slave
, RES_MPT
, id
);
2853 res_abort_move(dev
, slave
, RES_MPT
, id
);
2858 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2859 struct mlx4_vhcr
*vhcr
,
2860 struct mlx4_cmd_mailbox
*inbox
,
2861 struct mlx4_cmd_mailbox
*outbox
,
2862 struct mlx4_cmd_info
*cmd
)
2865 int index
= vhcr
->in_modifier
;
2866 struct res_mpt
*mpt
;
2869 id
= index
& mpt_mask(dev
);
2870 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2874 if (mpt
->com
.from_state
== RES_MPT_MAPPED
) {
2875 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2876 * that, the VF must read the MPT. But since the MPT entry memory is not
2877 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2878 * entry contents. To guarantee that the MPT cannot be changed, the driver
2879 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2880 * ownership fofollowing the change. The change here allows the VF to
2881 * perform QUERY_MPT also when the entry is in SW ownership.
2883 struct mlx4_mpt_entry
*mpt_entry
= mlx4_table_find(
2884 &mlx4_priv(dev
)->mr_table
.dmpt_table
,
2887 if (NULL
== mpt_entry
|| NULL
== outbox
->buf
) {
2892 memcpy(outbox
->buf
, mpt_entry
, sizeof(*mpt_entry
));
2895 } else if (mpt
->com
.from_state
== RES_MPT_HW
) {
2896 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2904 put_res(dev
, slave
, id
, RES_MPT
);
2908 static int qp_get_rcqn(struct mlx4_qp_context
*qpc
)
2910 return be32_to_cpu(qpc
->cqn_recv
) & 0xffffff;
2913 static int qp_get_scqn(struct mlx4_qp_context
*qpc
)
2915 return be32_to_cpu(qpc
->cqn_send
) & 0xffffff;
2918 static u32
qp_get_srqn(struct mlx4_qp_context
*qpc
)
2920 return be32_to_cpu(qpc
->srqn
) & 0x1ffffff;
2923 static void adjust_proxy_tun_qkey(struct mlx4_dev
*dev
, struct mlx4_vhcr
*vhcr
,
2924 struct mlx4_qp_context
*context
)
2926 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
2929 if (mlx4_get_parav_qkey(dev
, qpn
, &qkey
))
2932 /* adjust qkey in qp context */
2933 context
->qkey
= cpu_to_be32(qkey
);
2936 static int adjust_qp_sched_queue(struct mlx4_dev
*dev
, int slave
,
2937 struct mlx4_qp_context
*qpc
,
2938 struct mlx4_cmd_mailbox
*inbox
);
2940 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2941 struct mlx4_vhcr
*vhcr
,
2942 struct mlx4_cmd_mailbox
*inbox
,
2943 struct mlx4_cmd_mailbox
*outbox
,
2944 struct mlx4_cmd_info
*cmd
)
2947 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2948 struct res_mtt
*mtt
;
2950 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
2951 int mtt_base
= qp_get_mtt_addr(qpc
) / dev
->caps
.mtt_entry_sz
;
2952 int mtt_size
= qp_get_mtt_size(qpc
);
2955 int rcqn
= qp_get_rcqn(qpc
);
2956 int scqn
= qp_get_scqn(qpc
);
2957 u32 srqn
= qp_get_srqn(qpc
) & 0xffffff;
2958 int use_srq
= (qp_get_srqn(qpc
) >> 24) & 1;
2959 struct res_srq
*srq
;
2960 int local_qpn
= be32_to_cpu(qpc
->local_qpn
) & 0xffffff;
2962 err
= adjust_qp_sched_queue(dev
, slave
, qpc
, inbox
);
2966 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_HW
, &qp
, 0);
2969 qp
->local_qpn
= local_qpn
;
2970 qp
->sched_queue
= 0;
2972 qp
->vlan_control
= 0;
2974 qp
->pri_path_fl
= 0;
2977 qp
->qpc_flags
= be32_to_cpu(qpc
->flags
);
2979 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2983 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2987 err
= get_res(dev
, slave
, rcqn
, RES_CQ
, &rcq
);
2992 err
= get_res(dev
, slave
, scqn
, RES_CQ
, &scq
);
2999 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3004 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
3005 update_pkey_index(dev
, slave
, inbox
);
3006 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3009 atomic_inc(&mtt
->ref_count
);
3011 atomic_inc(&rcq
->ref_count
);
3013 atomic_inc(&scq
->ref_count
);
3017 put_res(dev
, slave
, scqn
, RES_CQ
);
3020 atomic_inc(&srq
->ref_count
);
3021 put_res(dev
, slave
, srqn
, RES_SRQ
);
3025 /* Save param3 for dynamic changes from VST back to VGT */
3026 qp
->param3
= qpc
->param3
;
3027 put_res(dev
, slave
, rcqn
, RES_CQ
);
3028 put_res(dev
, slave
, mtt_base
, RES_MTT
);
3029 res_end_move(dev
, slave
, RES_QP
, qpn
);
3035 put_res(dev
, slave
, srqn
, RES_SRQ
);
3038 put_res(dev
, slave
, scqn
, RES_CQ
);
3040 put_res(dev
, slave
, rcqn
, RES_CQ
);
3042 put_res(dev
, slave
, mtt_base
, RES_MTT
);
3044 res_abort_move(dev
, slave
, RES_QP
, qpn
);
3049 static int eq_get_mtt_addr(struct mlx4_eq_context
*eqc
)
3051 return be32_to_cpu(eqc
->mtt_base_addr_l
) & 0xfffffff8;
3054 static int eq_get_mtt_size(struct mlx4_eq_context
*eqc
)
3056 int log_eq_size
= eqc
->log_eq_size
& 0x1f;
3057 int page_shift
= (eqc
->log_page_size
& 0x3f) + 12;
3059 if (log_eq_size
+ 5 < page_shift
)
3062 return 1 << (log_eq_size
+ 5 - page_shift
);
3065 static int cq_get_mtt_addr(struct mlx4_cq_context
*cqc
)
3067 return be32_to_cpu(cqc
->mtt_base_addr_l
) & 0xfffffff8;
3070 static int cq_get_mtt_size(struct mlx4_cq_context
*cqc
)
3072 int log_cq_size
= (be32_to_cpu(cqc
->logsize_usrpage
) >> 24) & 0x1f;
3073 int page_shift
= (cqc
->log_page_size
& 0x3f) + 12;
3075 if (log_cq_size
+ 5 < page_shift
)
3078 return 1 << (log_cq_size
+ 5 - page_shift
);
3081 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3082 struct mlx4_vhcr
*vhcr
,
3083 struct mlx4_cmd_mailbox
*inbox
,
3084 struct mlx4_cmd_mailbox
*outbox
,
3085 struct mlx4_cmd_info
*cmd
)
3088 int eqn
= vhcr
->in_modifier
;
3089 int res_id
= (slave
<< 10) | eqn
;
3090 struct mlx4_eq_context
*eqc
= inbox
->buf
;
3091 int mtt_base
= eq_get_mtt_addr(eqc
) / dev
->caps
.mtt_entry_sz
;
3092 int mtt_size
= eq_get_mtt_size(eqc
);
3094 struct res_mtt
*mtt
;
3096 err
= add_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3099 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_HW
, &eq
);
3103 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3107 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
3111 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3115 atomic_inc(&mtt
->ref_count
);
3117 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3118 res_end_move(dev
, slave
, RES_EQ
, res_id
);
3122 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3124 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
3126 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3130 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev
*dev
, int slave
,
3131 struct mlx4_vhcr
*vhcr
,
3132 struct mlx4_cmd_mailbox
*inbox
,
3133 struct mlx4_cmd_mailbox
*outbox
,
3134 struct mlx4_cmd_info
*cmd
)
3137 u8 get
= vhcr
->op_modifier
;
3142 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3147 static int get_containing_mtt(struct mlx4_dev
*dev
, int slave
, int start
,
3148 int len
, struct res_mtt
**res
)
3150 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3151 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3152 struct res_mtt
*mtt
;
3155 spin_lock_irq(mlx4_tlock(dev
));
3156 list_for_each_entry(mtt
, &tracker
->slave_list
[slave
].res_list
[RES_MTT
],
3158 if (!check_mtt_range(dev
, slave
, start
, len
, mtt
)) {
3160 mtt
->com
.from_state
= mtt
->com
.state
;
3161 mtt
->com
.state
= RES_MTT_BUSY
;
3166 spin_unlock_irq(mlx4_tlock(dev
));
3171 static int verify_qp_parameters(struct mlx4_dev
*dev
,
3172 struct mlx4_vhcr
*vhcr
,
3173 struct mlx4_cmd_mailbox
*inbox
,
3174 enum qp_transition transition
, u8 slave
)
3178 struct mlx4_qp_context
*qp_ctx
;
3179 enum mlx4_qp_optpar optpar
;
3183 qp_ctx
= inbox
->buf
+ 8;
3184 qp_type
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
3185 optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
3187 if (slave
!= mlx4_master_func_num(dev
)) {
3188 qp_ctx
->params2
&= ~MLX4_QP_BIT_FPP
;
3189 /* setting QP rate-limit is disallowed for VFs */
3190 if (qp_ctx
->rate_limit_params
)
3196 case MLX4_QP_ST_XRC
:
3198 switch (transition
) {
3199 case QP_TRANS_INIT2RTR
:
3200 case QP_TRANS_RTR2RTS
:
3201 case QP_TRANS_RTS2RTS
:
3202 case QP_TRANS_SQD2SQD
:
3203 case QP_TRANS_SQD2RTS
:
3204 if (slave
!= mlx4_master_func_num(dev
)) {
3205 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
3206 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
3207 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
3208 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
3211 if (qp_ctx
->pri_path
.mgid_index
>= num_gids
)
3214 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
3215 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
3216 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
3217 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
3220 if (qp_ctx
->alt_path
.mgid_index
>= num_gids
)
3230 case MLX4_QP_ST_MLX
:
3231 qpn
= vhcr
->in_modifier
& 0x7fffff;
3232 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
3233 if (transition
== QP_TRANS_INIT2RTR
&&
3234 slave
!= mlx4_master_func_num(dev
) &&
3235 mlx4_is_qp_reserved(dev
, qpn
) &&
3236 !mlx4_vf_smi_enabled(dev
, slave
, port
)) {
3237 /* only enabled VFs may create MLX proxy QPs */
3238 mlx4_err(dev
, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3239 __func__
, slave
, port
);
3251 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev
*dev
, int slave
,
3252 struct mlx4_vhcr
*vhcr
,
3253 struct mlx4_cmd_mailbox
*inbox
,
3254 struct mlx4_cmd_mailbox
*outbox
,
3255 struct mlx4_cmd_info
*cmd
)
3257 struct mlx4_mtt mtt
;
3258 __be64
*page_list
= inbox
->buf
;
3259 u64
*pg_list
= (u64
*)page_list
;
3261 struct res_mtt
*rmtt
= NULL
;
3262 int start
= be64_to_cpu(page_list
[0]);
3263 int npages
= vhcr
->in_modifier
;
3266 err
= get_containing_mtt(dev
, slave
, start
, npages
, &rmtt
);
3270 /* Call the SW implementation of write_mtt:
3271 * - Prepare a dummy mtt struct
3272 * - Translate inbox contents to simple addresses in host endianness */
3273 mtt
.offset
= 0; /* TBD this is broken but I don't handle it since
3274 we don't really use it */
3277 for (i
= 0; i
< npages
; ++i
)
3278 pg_list
[i
+ 2] = (be64_to_cpu(page_list
[i
+ 2]) & ~1ULL);
3280 err
= __mlx4_write_mtt(dev
, &mtt
, be64_to_cpu(page_list
[0]), npages
,
3281 ((u64
*)page_list
+ 2));
3284 put_res(dev
, slave
, rmtt
->com
.res_id
, RES_MTT
);
3289 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3290 struct mlx4_vhcr
*vhcr
,
3291 struct mlx4_cmd_mailbox
*inbox
,
3292 struct mlx4_cmd_mailbox
*outbox
,
3293 struct mlx4_cmd_info
*cmd
)
3295 int eqn
= vhcr
->in_modifier
;
3296 int res_id
= eqn
| (slave
<< 10);
3300 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_RESERVED
, &eq
);
3304 err
= get_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
, NULL
);
3308 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3312 atomic_dec(&eq
->mtt
->ref_count
);
3313 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
3314 res_end_move(dev
, slave
, RES_EQ
, res_id
);
3315 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3320 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
3322 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
3327 int mlx4_GEN_EQE(struct mlx4_dev
*dev
, int slave
, struct mlx4_eqe
*eqe
)
3329 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3330 struct mlx4_slave_event_eq_info
*event_eq
;
3331 struct mlx4_cmd_mailbox
*mailbox
;
3332 u32 in_modifier
= 0;
3337 if (!priv
->mfunc
.master
.slave_state
)
3340 /* check for slave valid, slave not PF, and slave active */
3341 if (slave
< 0 || slave
> dev
->persist
->num_vfs
||
3342 slave
== dev
->caps
.function
||
3343 !priv
->mfunc
.master
.slave_state
[slave
].active
)
3346 event_eq
= &priv
->mfunc
.master
.slave_state
[slave
].event_eq
[eqe
->type
];
3348 /* Create the event only if the slave is registered */
3349 if (event_eq
->eqn
< 0)
3352 mutex_lock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3353 res_id
= (slave
<< 10) | event_eq
->eqn
;
3354 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &req
);
3358 if (req
->com
.from_state
!= RES_EQ_HW
) {
3363 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
3364 if (IS_ERR(mailbox
)) {
3365 err
= PTR_ERR(mailbox
);
3369 if (eqe
->type
== MLX4_EVENT_TYPE_CMD
) {
3371 eqe
->event
.cmd
.token
= cpu_to_be16(event_eq
->token
);
3374 memcpy(mailbox
->buf
, (u8
*) eqe
, 28);
3376 in_modifier
= (slave
& 0xff) | ((event_eq
->eqn
& 0x3ff) << 16);
3378 err
= mlx4_cmd(dev
, mailbox
->dma
, in_modifier
, 0,
3379 MLX4_CMD_GEN_EQE
, MLX4_CMD_TIME_CLASS_B
,
3382 put_res(dev
, slave
, res_id
, RES_EQ
);
3383 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3384 mlx4_free_cmd_mailbox(dev
, mailbox
);
3388 put_res(dev
, slave
, res_id
, RES_EQ
);
3391 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3395 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3396 struct mlx4_vhcr
*vhcr
,
3397 struct mlx4_cmd_mailbox
*inbox
,
3398 struct mlx4_cmd_mailbox
*outbox
,
3399 struct mlx4_cmd_info
*cmd
)
3401 int eqn
= vhcr
->in_modifier
;
3402 int res_id
= eqn
| (slave
<< 10);
3406 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &eq
);
3410 if (eq
->com
.from_state
!= RES_EQ_HW
) {
3415 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3418 put_res(dev
, slave
, res_id
, RES_EQ
);
3422 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3423 struct mlx4_vhcr
*vhcr
,
3424 struct mlx4_cmd_mailbox
*inbox
,
3425 struct mlx4_cmd_mailbox
*outbox
,
3426 struct mlx4_cmd_info
*cmd
)
3429 int cqn
= vhcr
->in_modifier
;
3430 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3431 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3432 struct res_cq
*cq
= NULL
;
3433 struct res_mtt
*mtt
;
3435 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_HW
, &cq
);
3438 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3441 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3444 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3447 atomic_inc(&mtt
->ref_count
);
3449 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3450 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3454 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3456 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3460 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3461 struct mlx4_vhcr
*vhcr
,
3462 struct mlx4_cmd_mailbox
*inbox
,
3463 struct mlx4_cmd_mailbox
*outbox
,
3464 struct mlx4_cmd_info
*cmd
)
3467 int cqn
= vhcr
->in_modifier
;
3468 struct res_cq
*cq
= NULL
;
3470 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_ALLOCATED
, &cq
);
3473 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3476 atomic_dec(&cq
->mtt
->ref_count
);
3477 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3481 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3485 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3486 struct mlx4_vhcr
*vhcr
,
3487 struct mlx4_cmd_mailbox
*inbox
,
3488 struct mlx4_cmd_mailbox
*outbox
,
3489 struct mlx4_cmd_info
*cmd
)
3491 int cqn
= vhcr
->in_modifier
;
3495 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3499 if (cq
->com
.from_state
!= RES_CQ_HW
)
3502 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3504 put_res(dev
, slave
, cqn
, RES_CQ
);
3509 static int handle_resize(struct mlx4_dev
*dev
, int slave
,
3510 struct mlx4_vhcr
*vhcr
,
3511 struct mlx4_cmd_mailbox
*inbox
,
3512 struct mlx4_cmd_mailbox
*outbox
,
3513 struct mlx4_cmd_info
*cmd
,
3517 struct res_mtt
*orig_mtt
;
3518 struct res_mtt
*mtt
;
3519 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3520 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3522 err
= get_res(dev
, slave
, cq
->mtt
->com
.res_id
, RES_MTT
, &orig_mtt
);
3526 if (orig_mtt
!= cq
->mtt
) {
3531 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3535 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3538 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3541 atomic_dec(&orig_mtt
->ref_count
);
3542 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3543 atomic_inc(&mtt
->ref_count
);
3545 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3549 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3551 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3557 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3558 struct mlx4_vhcr
*vhcr
,
3559 struct mlx4_cmd_mailbox
*inbox
,
3560 struct mlx4_cmd_mailbox
*outbox
,
3561 struct mlx4_cmd_info
*cmd
)
3563 int cqn
= vhcr
->in_modifier
;
3567 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3571 if (cq
->com
.from_state
!= RES_CQ_HW
)
3574 if (vhcr
->op_modifier
== 0) {
3575 err
= handle_resize(dev
, slave
, vhcr
, inbox
, outbox
, cmd
, cq
);
3579 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3581 put_res(dev
, slave
, cqn
, RES_CQ
);
3586 static int srq_get_mtt_size(struct mlx4_srq_context
*srqc
)
3588 int log_srq_size
= (be32_to_cpu(srqc
->state_logsize_srqn
) >> 24) & 0xf;
3589 int log_rq_stride
= srqc
->logstride
& 7;
3590 int page_shift
= (srqc
->log_page_size
& 0x3f) + 12;
3592 if (log_srq_size
+ log_rq_stride
+ 4 < page_shift
)
3595 return 1 << (log_srq_size
+ log_rq_stride
+ 4 - page_shift
);
3598 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3599 struct mlx4_vhcr
*vhcr
,
3600 struct mlx4_cmd_mailbox
*inbox
,
3601 struct mlx4_cmd_mailbox
*outbox
,
3602 struct mlx4_cmd_info
*cmd
)
3605 int srqn
= vhcr
->in_modifier
;
3606 struct res_mtt
*mtt
;
3607 struct res_srq
*srq
= NULL
;
3608 struct mlx4_srq_context
*srqc
= inbox
->buf
;
3609 int mtt_base
= srq_get_mtt_addr(srqc
) / dev
->caps
.mtt_entry_sz
;
3611 if (srqn
!= (be32_to_cpu(srqc
->state_logsize_srqn
) & 0xffffff))
3614 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_HW
, &srq
);
3617 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3620 err
= check_mtt_range(dev
, slave
, mtt_base
, srq_get_mtt_size(srqc
),
3625 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3629 atomic_inc(&mtt
->ref_count
);
3631 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3632 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3636 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3638 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3643 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3644 struct mlx4_vhcr
*vhcr
,
3645 struct mlx4_cmd_mailbox
*inbox
,
3646 struct mlx4_cmd_mailbox
*outbox
,
3647 struct mlx4_cmd_info
*cmd
)
3650 int srqn
= vhcr
->in_modifier
;
3651 struct res_srq
*srq
= NULL
;
3653 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_ALLOCATED
, &srq
);
3656 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3659 atomic_dec(&srq
->mtt
->ref_count
);
3661 atomic_dec(&srq
->cq
->ref_count
);
3662 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3667 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3672 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3673 struct mlx4_vhcr
*vhcr
,
3674 struct mlx4_cmd_mailbox
*inbox
,
3675 struct mlx4_cmd_mailbox
*outbox
,
3676 struct mlx4_cmd_info
*cmd
)
3679 int srqn
= vhcr
->in_modifier
;
3680 struct res_srq
*srq
;
3682 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3685 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3689 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3691 put_res(dev
, slave
, srqn
, RES_SRQ
);
3695 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3696 struct mlx4_vhcr
*vhcr
,
3697 struct mlx4_cmd_mailbox
*inbox
,
3698 struct mlx4_cmd_mailbox
*outbox
,
3699 struct mlx4_cmd_info
*cmd
)
3702 int srqn
= vhcr
->in_modifier
;
3703 struct res_srq
*srq
;
3705 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3709 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3714 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3716 put_res(dev
, slave
, srqn
, RES_SRQ
);
3720 int mlx4_GEN_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3721 struct mlx4_vhcr
*vhcr
,
3722 struct mlx4_cmd_mailbox
*inbox
,
3723 struct mlx4_cmd_mailbox
*outbox
,
3724 struct mlx4_cmd_info
*cmd
)
3727 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3730 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3733 if (qp
->com
.from_state
!= RES_QP_HW
) {
3738 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3740 put_res(dev
, slave
, qpn
, RES_QP
);
3744 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3745 struct mlx4_vhcr
*vhcr
,
3746 struct mlx4_cmd_mailbox
*inbox
,
3747 struct mlx4_cmd_mailbox
*outbox
,
3748 struct mlx4_cmd_info
*cmd
)
3750 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3751 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3752 update_pkey_index(dev
, slave
, inbox
);
3753 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3756 static int adjust_qp_sched_queue(struct mlx4_dev
*dev
, int slave
,
3757 struct mlx4_qp_context
*qpc
,
3758 struct mlx4_cmd_mailbox
*inbox
)
3760 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*)inbox
->buf
);
3762 int port
= mlx4_slave_convert_port(
3763 dev
, slave
, (qpc
->pri_path
.sched_queue
>> 6 & 1) + 1) - 1;
3768 pri_sched_queue
= (qpc
->pri_path
.sched_queue
& ~(1 << 6)) |
3771 if (optpar
& (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
| MLX4_QP_OPTPAR_SCHED_QUEUE
) ||
3772 qpc
->pri_path
.sched_queue
|| mlx4_is_eth(dev
, port
+ 1)) {
3773 qpc
->pri_path
.sched_queue
= pri_sched_queue
;
3776 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
3777 port
= mlx4_slave_convert_port(
3778 dev
, slave
, (qpc
->alt_path
.sched_queue
>> 6 & 1)
3782 qpc
->alt_path
.sched_queue
=
3783 (qpc
->alt_path
.sched_queue
& ~(1 << 6)) |
3789 static int roce_verify_mac(struct mlx4_dev
*dev
, int slave
,
3790 struct mlx4_qp_context
*qpc
,
3791 struct mlx4_cmd_mailbox
*inbox
)
3795 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
3796 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
3799 port
= (sched
>> 6 & 1) + 1;
3800 if (mlx4_is_eth(dev
, port
) && (ts
!= MLX4_QP_ST_MLX
)) {
3801 smac_ix
= qpc
->pri_path
.grh_mylmc
& 0x7f;
3802 if (mac_find_smac_ix_in_slave(dev
, slave
, port
, smac_ix
, &mac
))
3808 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3809 struct mlx4_vhcr
*vhcr
,
3810 struct mlx4_cmd_mailbox
*inbox
,
3811 struct mlx4_cmd_mailbox
*outbox
,
3812 struct mlx4_cmd_info
*cmd
)
3815 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
3816 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3818 u8 orig_sched_queue
;
3819 u8 orig_vlan_control
= qpc
->pri_path
.vlan_control
;
3820 u8 orig_fvl_rx
= qpc
->pri_path
.fvl_rx
;
3821 u8 orig_pri_path_fl
= qpc
->pri_path
.fl
;
3822 u8 orig_vlan_index
= qpc
->pri_path
.vlan_index
;
3823 u8 orig_feup
= qpc
->pri_path
.feup
;
3825 err
= adjust_qp_sched_queue(dev
, slave
, qpc
, inbox
);
3828 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_INIT2RTR
, slave
);
3832 if (roce_verify_mac(dev
, slave
, qpc
, inbox
))
3835 update_pkey_index(dev
, slave
, inbox
);
3836 update_gid(dev
, inbox
, (u8
)slave
);
3837 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
3838 orig_sched_queue
= qpc
->pri_path
.sched_queue
;
3840 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3843 if (qp
->com
.from_state
!= RES_QP_HW
) {
3848 err
= update_vport_qp_param(dev
, inbox
, slave
, qpn
);
3852 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3854 /* if no error, save sched queue value passed in by VF. This is
3855 * essentially the QOS value provided by the VF. This will be useful
3856 * if we allow dynamic changes from VST back to VGT
3859 qp
->sched_queue
= orig_sched_queue
;
3860 qp
->vlan_control
= orig_vlan_control
;
3861 qp
->fvl_rx
= orig_fvl_rx
;
3862 qp
->pri_path_fl
= orig_pri_path_fl
;
3863 qp
->vlan_index
= orig_vlan_index
;
3864 qp
->feup
= orig_feup
;
3866 put_res(dev
, slave
, qpn
, RES_QP
);
3870 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3871 struct mlx4_vhcr
*vhcr
,
3872 struct mlx4_cmd_mailbox
*inbox
,
3873 struct mlx4_cmd_mailbox
*outbox
,
3874 struct mlx4_cmd_info
*cmd
)
3877 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3879 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3882 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_RTR2RTS
, slave
);
3886 update_pkey_index(dev
, slave
, inbox
);
3887 update_gid(dev
, inbox
, (u8
)slave
);
3888 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3889 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3892 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3893 struct mlx4_vhcr
*vhcr
,
3894 struct mlx4_cmd_mailbox
*inbox
,
3895 struct mlx4_cmd_mailbox
*outbox
,
3896 struct mlx4_cmd_info
*cmd
)
3899 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3901 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3904 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_RTS2RTS
, slave
);
3908 update_pkey_index(dev
, slave
, inbox
);
3909 update_gid(dev
, inbox
, (u8
)slave
);
3910 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3911 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3915 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3916 struct mlx4_vhcr
*vhcr
,
3917 struct mlx4_cmd_mailbox
*inbox
,
3918 struct mlx4_cmd_mailbox
*outbox
,
3919 struct mlx4_cmd_info
*cmd
)
3921 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3922 int err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3925 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3926 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3929 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3930 struct mlx4_vhcr
*vhcr
,
3931 struct mlx4_cmd_mailbox
*inbox
,
3932 struct mlx4_cmd_mailbox
*outbox
,
3933 struct mlx4_cmd_info
*cmd
)
3936 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3938 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3941 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_SQD2SQD
, slave
);
3945 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3946 update_gid(dev
, inbox
, (u8
)slave
);
3947 update_pkey_index(dev
, slave
, inbox
);
3948 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3951 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3952 struct mlx4_vhcr
*vhcr
,
3953 struct mlx4_cmd_mailbox
*inbox
,
3954 struct mlx4_cmd_mailbox
*outbox
,
3955 struct mlx4_cmd_info
*cmd
)
3958 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3960 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3963 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_SQD2RTS
, slave
);
3967 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3968 update_gid(dev
, inbox
, (u8
)slave
);
3969 update_pkey_index(dev
, slave
, inbox
);
3970 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3973 int mlx4_2RST_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3974 struct mlx4_vhcr
*vhcr
,
3975 struct mlx4_cmd_mailbox
*inbox
,
3976 struct mlx4_cmd_mailbox
*outbox
,
3977 struct mlx4_cmd_info
*cmd
)
3980 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3983 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
, &qp
, 0);
3986 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3990 atomic_dec(&qp
->mtt
->ref_count
);
3991 atomic_dec(&qp
->rcq
->ref_count
);
3992 atomic_dec(&qp
->scq
->ref_count
);
3994 atomic_dec(&qp
->srq
->ref_count
);
3995 res_end_move(dev
, slave
, RES_QP
, qpn
);
3999 res_abort_move(dev
, slave
, RES_QP
, qpn
);
4004 static struct res_gid
*find_gid(struct mlx4_dev
*dev
, int slave
,
4005 struct res_qp
*rqp
, u8
*gid
)
4007 struct res_gid
*res
;
4009 list_for_each_entry(res
, &rqp
->mcg_list
, list
) {
4010 if (!memcmp(res
->gid
, gid
, 16))
4016 static int add_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
4017 u8
*gid
, enum mlx4_protocol prot
,
4018 enum mlx4_steer_type steer
, u64 reg_id
)
4020 struct res_gid
*res
;
4023 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
4027 spin_lock_irq(&rqp
->mcg_spl
);
4028 if (find_gid(dev
, slave
, rqp
, gid
)) {
4032 memcpy(res
->gid
, gid
, 16);
4035 res
->reg_id
= reg_id
;
4036 list_add_tail(&res
->list
, &rqp
->mcg_list
);
4039 spin_unlock_irq(&rqp
->mcg_spl
);
4044 static int rem_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
4045 u8
*gid
, enum mlx4_protocol prot
,
4046 enum mlx4_steer_type steer
, u64
*reg_id
)
4048 struct res_gid
*res
;
4051 spin_lock_irq(&rqp
->mcg_spl
);
4052 res
= find_gid(dev
, slave
, rqp
, gid
);
4053 if (!res
|| res
->prot
!= prot
|| res
->steer
!= steer
)
4056 *reg_id
= res
->reg_id
;
4057 list_del(&res
->list
);
4061 spin_unlock_irq(&rqp
->mcg_spl
);
4066 static int qp_attach(struct mlx4_dev
*dev
, int slave
, struct mlx4_qp
*qp
,
4067 u8 gid
[16], int block_loopback
, enum mlx4_protocol prot
,
4068 enum mlx4_steer_type type
, u64
*reg_id
)
4070 switch (dev
->caps
.steering_mode
) {
4071 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
4072 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4075 return mlx4_trans_to_dmfs_attach(dev
, qp
, gid
, port
,
4076 block_loopback
, prot
,
4079 case MLX4_STEERING_MODE_B0
:
4080 if (prot
== MLX4_PROT_ETH
) {
4081 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4086 return mlx4_qp_attach_common(dev
, qp
, gid
,
4087 block_loopback
, prot
, type
);
4093 static int qp_detach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
,
4094 u8 gid
[16], enum mlx4_protocol prot
,
4095 enum mlx4_steer_type type
, u64 reg_id
)
4097 switch (dev
->caps
.steering_mode
) {
4098 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
4099 return mlx4_flow_detach(dev
, reg_id
);
4100 case MLX4_STEERING_MODE_B0
:
4101 return mlx4_qp_detach_common(dev
, qp
, gid
, prot
, type
);
4107 static int mlx4_adjust_port(struct mlx4_dev
*dev
, int slave
,
4108 u8
*gid
, enum mlx4_protocol prot
)
4112 if (prot
!= MLX4_PROT_ETH
)
4115 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_B0
||
4116 dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
4117 real_port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4126 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4127 struct mlx4_vhcr
*vhcr
,
4128 struct mlx4_cmd_mailbox
*inbox
,
4129 struct mlx4_cmd_mailbox
*outbox
,
4130 struct mlx4_cmd_info
*cmd
)
4132 struct mlx4_qp qp
; /* dummy for calling attach/detach */
4133 u8
*gid
= inbox
->buf
;
4134 enum mlx4_protocol prot
= (vhcr
->in_modifier
>> 28) & 0x7;
4139 int attach
= vhcr
->op_modifier
;
4140 int block_loopback
= vhcr
->in_modifier
>> 31;
4141 u8 steer_type_mask
= 2;
4142 enum mlx4_steer_type type
= (gid
[7] & steer_type_mask
) >> 1;
4144 qpn
= vhcr
->in_modifier
& 0xffffff;
4145 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4151 err
= qp_attach(dev
, slave
, &qp
, gid
, block_loopback
, prot
,
4154 pr_err("Fail to attach rule to qp 0x%x\n", qpn
);
4157 err
= add_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, reg_id
);
4161 err
= mlx4_adjust_port(dev
, slave
, gid
, prot
);
4165 err
= rem_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, ®_id
);
4169 err
= qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
4171 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4174 put_res(dev
, slave
, qpn
, RES_QP
);
4178 qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
4180 put_res(dev
, slave
, qpn
, RES_QP
);
4185 * MAC validation for Flow Steering rules.
4186 * VF can attach rules only with a mac address which is assigned to it.
4188 static int validate_eth_header_mac(int slave
, struct _rule_hw
*eth_header
,
4189 struct list_head
*rlist
)
4191 struct mac_res
*res
, *tmp
;
4194 /* make sure it isn't multicast or broadcast mac*/
4195 if (!is_multicast_ether_addr(eth_header
->eth
.dst_mac
) &&
4196 !is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
4197 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
4198 be_mac
= cpu_to_be64(res
->mac
<< 16);
4199 if (ether_addr_equal((u8
*)&be_mac
, eth_header
->eth
.dst_mac
))
4202 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4203 eth_header
->eth
.dst_mac
, slave
);
4210 * In case of missing eth header, append eth header with a MAC address
4211 * assigned to the VF.
4213 static int add_eth_header(struct mlx4_dev
*dev
, int slave
,
4214 struct mlx4_cmd_mailbox
*inbox
,
4215 struct list_head
*rlist
, int header_id
)
4217 struct mac_res
*res
, *tmp
;
4219 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
4220 struct mlx4_net_trans_rule_hw_eth
*eth_header
;
4221 struct mlx4_net_trans_rule_hw_ipv4
*ip_header
;
4222 struct mlx4_net_trans_rule_hw_tcp_udp
*l4_header
;
4224 __be64 mac_msk
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
4226 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
4228 eth_header
= (struct mlx4_net_trans_rule_hw_eth
*)(ctrl
+ 1);
4230 /* Clear a space in the inbox for eth header */
4231 switch (header_id
) {
4232 case MLX4_NET_TRANS_RULE_ID_IPV4
:
4234 (struct mlx4_net_trans_rule_hw_ipv4
*)(eth_header
+ 1);
4235 memmove(ip_header
, eth_header
,
4236 sizeof(*ip_header
) + sizeof(*l4_header
));
4238 case MLX4_NET_TRANS_RULE_ID_TCP
:
4239 case MLX4_NET_TRANS_RULE_ID_UDP
:
4240 l4_header
= (struct mlx4_net_trans_rule_hw_tcp_udp
*)
4242 memmove(l4_header
, eth_header
, sizeof(*l4_header
));
4247 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
4248 if (port
== res
->port
) {
4249 be_mac
= cpu_to_be64(res
->mac
<< 16);
4254 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4259 memset(eth_header
, 0, sizeof(*eth_header
));
4260 eth_header
->size
= sizeof(*eth_header
) >> 2;
4261 eth_header
->id
= cpu_to_be16(__sw_id_hw
[MLX4_NET_TRANS_RULE_ID_ETH
]);
4262 memcpy(eth_header
->dst_mac
, &be_mac
, ETH_ALEN
);
4263 memcpy(eth_header
->dst_mac_msk
, &mac_msk
, ETH_ALEN
);
4269 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4270 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4271 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4272 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
4273 struct mlx4_vhcr
*vhcr
,
4274 struct mlx4_cmd_mailbox
*inbox
,
4275 struct mlx4_cmd_mailbox
*outbox
,
4276 struct mlx4_cmd_info
*cmd_info
)
4279 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
4283 u64 pri_addr_path_mask
;
4284 struct mlx4_update_qp_context
*cmd
;
4287 cmd
= (struct mlx4_update_qp_context
*)inbox
->buf
;
4289 pri_addr_path_mask
= be64_to_cpu(cmd
->primary_addr_path_mask
);
4290 if (cmd
->qp_mask
|| cmd
->secondary_addr_path_mask
||
4291 (pri_addr_path_mask
& ~MLX4_UPD_QP_PATH_MASK_SUPPORTED
))
4294 if ((pri_addr_path_mask
&
4295 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB
)) &&
4296 !(dev
->caps
.flags2
&
4297 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB
)) {
4298 mlx4_warn(dev
, "Src check LB for slave %d isn't supported\n",
4303 /* Just change the smac for the QP */
4304 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4306 mlx4_err(dev
, "Updating qpn 0x%x for slave %d rejected\n", qpn
, slave
);
4310 port
= (rqp
->sched_queue
>> 6 & 1) + 1;
4312 if (pri_addr_path_mask
& (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX
)) {
4313 smac_index
= cmd
->qp_context
.pri_path
.grh_mylmc
;
4314 err
= mac_find_smac_ix_in_slave(dev
, slave
, port
,
4318 mlx4_err(dev
, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4324 err
= mlx4_cmd(dev
, inbox
->dma
,
4325 vhcr
->in_modifier
, 0,
4326 MLX4_CMD_UPDATE_QP
, MLX4_CMD_TIME_CLASS_A
,
4329 mlx4_err(dev
, "Failed to update qpn on qpn 0x%x, command failed\n", qpn
);
4334 put_res(dev
, slave
, qpn
, RES_QP
);
4338 static u32
qp_attach_mbox_size(void *mbox
)
4340 u32 size
= sizeof(struct mlx4_net_trans_rule_hw_ctrl
);
4341 struct _rule_hw
*rule_header
;
4343 rule_header
= (struct _rule_hw
*)(mbox
+ size
);
4345 while (rule_header
->size
) {
4346 size
+= rule_header
->size
* sizeof(u32
);
4352 static int mlx4_do_mirror_rule(struct mlx4_dev
*dev
, struct res_fs_rule
*fs_rule
);
4354 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4355 struct mlx4_vhcr
*vhcr
,
4356 struct mlx4_cmd_mailbox
*inbox
,
4357 struct mlx4_cmd_mailbox
*outbox
,
4358 struct mlx4_cmd_info
*cmd
)
4361 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4362 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4363 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
4367 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
4368 struct _rule_hw
*rule_header
;
4370 struct res_fs_rule
*rrule
;
4373 if (dev
->caps
.steering_mode
!=
4374 MLX4_STEERING_MODE_DEVICE_MANAGED
)
4377 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
4378 err
= mlx4_slave_convert_port(dev
, slave
, ctrl
->port
);
4382 qpn
= be32_to_cpu(ctrl
->qpn
) & 0xffffff;
4383 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4385 pr_err("Steering rule with qpn 0x%x rejected\n", qpn
);
4388 rule_header
= (struct _rule_hw
*)(ctrl
+ 1);
4389 header_id
= map_hw_to_sw_id(be16_to_cpu(rule_header
->id
));
4391 if (header_id
== MLX4_NET_TRANS_RULE_ID_ETH
)
4392 mlx4_handle_eth_header_mcast_prio(ctrl
, rule_header
);
4394 switch (header_id
) {
4395 case MLX4_NET_TRANS_RULE_ID_ETH
:
4396 if (validate_eth_header_mac(slave
, rule_header
, rlist
)) {
4401 case MLX4_NET_TRANS_RULE_ID_IB
:
4403 case MLX4_NET_TRANS_RULE_ID_IPV4
:
4404 case MLX4_NET_TRANS_RULE_ID_TCP
:
4405 case MLX4_NET_TRANS_RULE_ID_UDP
:
4406 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4407 if (add_eth_header(dev
, slave
, inbox
, rlist
, header_id
)) {
4411 vhcr
->in_modifier
+=
4412 sizeof(struct mlx4_net_trans_rule_hw_eth
) >> 2;
4415 pr_err("Corrupted mailbox\n");
4420 err
= mlx4_cmd_imm(dev
, inbox
->dma
, &vhcr
->out_param
,
4421 vhcr
->in_modifier
, 0,
4422 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
4428 err
= add_res_range(dev
, slave
, vhcr
->out_param
, 1, RES_FS_RULE
, qpn
);
4430 mlx4_err(dev
, "Fail to add flow steering resources\n");
4434 err
= get_res(dev
, slave
, vhcr
->out_param
, RES_FS_RULE
, &rrule
);
4438 mbox_size
= qp_attach_mbox_size(inbox
->buf
);
4439 rrule
->mirr_mbox
= kmalloc(mbox_size
, GFP_KERNEL
);
4440 if (!rrule
->mirr_mbox
) {
4444 rrule
->mirr_mbox_size
= mbox_size
;
4445 rrule
->mirr_rule_id
= 0;
4446 memcpy(rrule
->mirr_mbox
, inbox
->buf
, mbox_size
);
4448 /* set different port */
4449 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)rrule
->mirr_mbox
;
4450 if (ctrl
->port
== 1)
4455 if (mlx4_is_bonded(dev
))
4456 mlx4_do_mirror_rule(dev
, rrule
);
4458 atomic_inc(&rqp
->ref_count
);
4461 put_res(dev
, slave
, vhcr
->out_param
, RES_FS_RULE
);
4463 /* detach rule on error */
4465 mlx4_cmd(dev
, vhcr
->out_param
, 0, 0,
4466 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
4469 put_res(dev
, slave
, qpn
, RES_QP
);
4473 static int mlx4_undo_mirror_rule(struct mlx4_dev
*dev
, struct res_fs_rule
*fs_rule
)
4477 err
= rem_res_range(dev
, fs_rule
->com
.owner
, fs_rule
->com
.res_id
, 1, RES_FS_RULE
, 0);
4479 mlx4_err(dev
, "Fail to remove flow steering resources\n");
4483 mlx4_cmd(dev
, fs_rule
->com
.res_id
, 0, 0, MLX4_QP_FLOW_STEERING_DETACH
,
4484 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
4488 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4489 struct mlx4_vhcr
*vhcr
,
4490 struct mlx4_cmd_mailbox
*inbox
,
4491 struct mlx4_cmd_mailbox
*outbox
,
4492 struct mlx4_cmd_info
*cmd
)
4496 struct res_fs_rule
*rrule
;
4500 if (dev
->caps
.steering_mode
!=
4501 MLX4_STEERING_MODE_DEVICE_MANAGED
)
4504 err
= get_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
, &rrule
);
4508 if (!rrule
->mirr_mbox
) {
4509 mlx4_err(dev
, "Mirror rules cannot be removed explicitly\n");
4510 put_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
);
4513 mirr_reg_id
= rrule
->mirr_rule_id
;
4514 kfree(rrule
->mirr_mbox
);
4517 /* Release the rule form busy state before removal */
4518 put_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
);
4519 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4523 if (mirr_reg_id
&& mlx4_is_bonded(dev
)) {
4524 err
= get_res(dev
, slave
, mirr_reg_id
, RES_FS_RULE
, &rrule
);
4526 mlx4_err(dev
, "Fail to get resource of mirror rule\n");
4528 put_res(dev
, slave
, mirr_reg_id
, RES_FS_RULE
);
4529 mlx4_undo_mirror_rule(dev
, rrule
);
4532 err
= rem_res_range(dev
, slave
, vhcr
->in_param
, 1, RES_FS_RULE
, 0);
4534 mlx4_err(dev
, "Fail to remove flow steering resources\n");
4538 err
= mlx4_cmd(dev
, vhcr
->in_param
, 0, 0,
4539 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
4542 atomic_dec(&rqp
->ref_count
);
4544 put_res(dev
, slave
, qpn
, RES_QP
);
4549 BUSY_MAX_RETRIES
= 10
4552 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev
*dev
, int slave
,
4553 struct mlx4_vhcr
*vhcr
,
4554 struct mlx4_cmd_mailbox
*inbox
,
4555 struct mlx4_cmd_mailbox
*outbox
,
4556 struct mlx4_cmd_info
*cmd
)
4559 int index
= vhcr
->in_modifier
& 0xffff;
4561 err
= get_res(dev
, slave
, index
, RES_COUNTER
, NULL
);
4565 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
4566 put_res(dev
, slave
, index
, RES_COUNTER
);
4570 static void detach_qp(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
)
4572 struct res_gid
*rgid
;
4573 struct res_gid
*tmp
;
4574 struct mlx4_qp qp
; /* dummy for calling attach/detach */
4576 list_for_each_entry_safe(rgid
, tmp
, &rqp
->mcg_list
, list
) {
4577 switch (dev
->caps
.steering_mode
) {
4578 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
4579 mlx4_flow_detach(dev
, rgid
->reg_id
);
4581 case MLX4_STEERING_MODE_B0
:
4582 qp
.qpn
= rqp
->local_qpn
;
4583 (void) mlx4_qp_detach_common(dev
, &qp
, rgid
->gid
,
4584 rgid
->prot
, rgid
->steer
);
4587 list_del(&rgid
->list
);
4592 static int _move_all_busy(struct mlx4_dev
*dev
, int slave
,
4593 enum mlx4_resource type
, int print
)
4595 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4596 struct mlx4_resource_tracker
*tracker
=
4597 &priv
->mfunc
.master
.res_tracker
;
4598 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[type
];
4599 struct res_common
*r
;
4600 struct res_common
*tmp
;
4604 spin_lock_irq(mlx4_tlock(dev
));
4605 list_for_each_entry_safe(r
, tmp
, rlist
, list
) {
4606 if (r
->owner
== slave
) {
4608 if (r
->state
== RES_ANY_BUSY
) {
4611 "%s id 0x%llx is busy\n",
4616 r
->from_state
= r
->state
;
4617 r
->state
= RES_ANY_BUSY
;
4623 spin_unlock_irq(mlx4_tlock(dev
));
4628 static int move_all_busy(struct mlx4_dev
*dev
, int slave
,
4629 enum mlx4_resource type
)
4631 unsigned long begin
;
4636 busy
= _move_all_busy(dev
, slave
, type
, 0);
4637 if (time_after(jiffies
, begin
+ 5 * HZ
))
4644 busy
= _move_all_busy(dev
, slave
, type
, 1);
4648 static void rem_slave_qps(struct mlx4_dev
*dev
, int slave
)
4650 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4651 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4652 struct list_head
*qp_list
=
4653 &tracker
->slave_list
[slave
].res_list
[RES_QP
];
4661 err
= move_all_busy(dev
, slave
, RES_QP
);
4663 mlx4_warn(dev
, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4666 spin_lock_irq(mlx4_tlock(dev
));
4667 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
4668 spin_unlock_irq(mlx4_tlock(dev
));
4669 if (qp
->com
.owner
== slave
) {
4670 qpn
= qp
->com
.res_id
;
4671 detach_qp(dev
, slave
, qp
);
4672 state
= qp
->com
.from_state
;
4673 while (state
!= 0) {
4675 case RES_QP_RESERVED
:
4676 spin_lock_irq(mlx4_tlock(dev
));
4677 rb_erase(&qp
->com
.node
,
4678 &tracker
->res_tree
[RES_QP
]);
4679 list_del(&qp
->com
.list
);
4680 spin_unlock_irq(mlx4_tlock(dev
));
4681 if (!valid_reserved(dev
, slave
, qpn
)) {
4682 __mlx4_qp_release_range(dev
, qpn
, 1);
4683 mlx4_release_resource(dev
, slave
,
4690 if (!valid_reserved(dev
, slave
, qpn
))
4691 __mlx4_qp_free_icm(dev
, qpn
);
4692 state
= RES_QP_RESERVED
;
4696 err
= mlx4_cmd(dev
, in_param
,
4699 MLX4_CMD_TIME_CLASS_A
,
4702 mlx4_dbg(dev
, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4703 slave
, qp
->local_qpn
);
4704 atomic_dec(&qp
->rcq
->ref_count
);
4705 atomic_dec(&qp
->scq
->ref_count
);
4706 atomic_dec(&qp
->mtt
->ref_count
);
4708 atomic_dec(&qp
->srq
->ref_count
);
4709 state
= RES_QP_MAPPED
;
4716 spin_lock_irq(mlx4_tlock(dev
));
4718 spin_unlock_irq(mlx4_tlock(dev
));
4721 static void rem_slave_srqs(struct mlx4_dev
*dev
, int slave
)
4723 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4724 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4725 struct list_head
*srq_list
=
4726 &tracker
->slave_list
[slave
].res_list
[RES_SRQ
];
4727 struct res_srq
*srq
;
4728 struct res_srq
*tmp
;
4735 err
= move_all_busy(dev
, slave
, RES_SRQ
);
4737 mlx4_warn(dev
, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4740 spin_lock_irq(mlx4_tlock(dev
));
4741 list_for_each_entry_safe(srq
, tmp
, srq_list
, com
.list
) {
4742 spin_unlock_irq(mlx4_tlock(dev
));
4743 if (srq
->com
.owner
== slave
) {
4744 srqn
= srq
->com
.res_id
;
4745 state
= srq
->com
.from_state
;
4746 while (state
!= 0) {
4748 case RES_SRQ_ALLOCATED
:
4749 __mlx4_srq_free_icm(dev
, srqn
);
4750 spin_lock_irq(mlx4_tlock(dev
));
4751 rb_erase(&srq
->com
.node
,
4752 &tracker
->res_tree
[RES_SRQ
]);
4753 list_del(&srq
->com
.list
);
4754 spin_unlock_irq(mlx4_tlock(dev
));
4755 mlx4_release_resource(dev
, slave
,
4763 err
= mlx4_cmd(dev
, in_param
, srqn
, 1,
4765 MLX4_CMD_TIME_CLASS_A
,
4768 mlx4_dbg(dev
, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4771 atomic_dec(&srq
->mtt
->ref_count
);
4773 atomic_dec(&srq
->cq
->ref_count
);
4774 state
= RES_SRQ_ALLOCATED
;
4782 spin_lock_irq(mlx4_tlock(dev
));
4784 spin_unlock_irq(mlx4_tlock(dev
));
4787 static void rem_slave_cqs(struct mlx4_dev
*dev
, int slave
)
4789 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4790 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4791 struct list_head
*cq_list
=
4792 &tracker
->slave_list
[slave
].res_list
[RES_CQ
];
4801 err
= move_all_busy(dev
, slave
, RES_CQ
);
4803 mlx4_warn(dev
, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4806 spin_lock_irq(mlx4_tlock(dev
));
4807 list_for_each_entry_safe(cq
, tmp
, cq_list
, com
.list
) {
4808 spin_unlock_irq(mlx4_tlock(dev
));
4809 if (cq
->com
.owner
== slave
&& !atomic_read(&cq
->ref_count
)) {
4810 cqn
= cq
->com
.res_id
;
4811 state
= cq
->com
.from_state
;
4812 while (state
!= 0) {
4814 case RES_CQ_ALLOCATED
:
4815 __mlx4_cq_free_icm(dev
, cqn
);
4816 spin_lock_irq(mlx4_tlock(dev
));
4817 rb_erase(&cq
->com
.node
,
4818 &tracker
->res_tree
[RES_CQ
]);
4819 list_del(&cq
->com
.list
);
4820 spin_unlock_irq(mlx4_tlock(dev
));
4821 mlx4_release_resource(dev
, slave
,
4829 err
= mlx4_cmd(dev
, in_param
, cqn
, 1,
4831 MLX4_CMD_TIME_CLASS_A
,
4834 mlx4_dbg(dev
, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4836 atomic_dec(&cq
->mtt
->ref_count
);
4837 state
= RES_CQ_ALLOCATED
;
4845 spin_lock_irq(mlx4_tlock(dev
));
4847 spin_unlock_irq(mlx4_tlock(dev
));
4850 static void rem_slave_mrs(struct mlx4_dev
*dev
, int slave
)
4852 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4853 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4854 struct list_head
*mpt_list
=
4855 &tracker
->slave_list
[slave
].res_list
[RES_MPT
];
4856 struct res_mpt
*mpt
;
4857 struct res_mpt
*tmp
;
4864 err
= move_all_busy(dev
, slave
, RES_MPT
);
4866 mlx4_warn(dev
, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4869 spin_lock_irq(mlx4_tlock(dev
));
4870 list_for_each_entry_safe(mpt
, tmp
, mpt_list
, com
.list
) {
4871 spin_unlock_irq(mlx4_tlock(dev
));
4872 if (mpt
->com
.owner
== slave
) {
4873 mptn
= mpt
->com
.res_id
;
4874 state
= mpt
->com
.from_state
;
4875 while (state
!= 0) {
4877 case RES_MPT_RESERVED
:
4878 __mlx4_mpt_release(dev
, mpt
->key
);
4879 spin_lock_irq(mlx4_tlock(dev
));
4880 rb_erase(&mpt
->com
.node
,
4881 &tracker
->res_tree
[RES_MPT
]);
4882 list_del(&mpt
->com
.list
);
4883 spin_unlock_irq(mlx4_tlock(dev
));
4884 mlx4_release_resource(dev
, slave
,
4890 case RES_MPT_MAPPED
:
4891 __mlx4_mpt_free_icm(dev
, mpt
->key
);
4892 state
= RES_MPT_RESERVED
;
4897 err
= mlx4_cmd(dev
, in_param
, mptn
, 0,
4899 MLX4_CMD_TIME_CLASS_A
,
4902 mlx4_dbg(dev
, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4905 atomic_dec(&mpt
->mtt
->ref_count
);
4906 state
= RES_MPT_MAPPED
;
4913 spin_lock_irq(mlx4_tlock(dev
));
4915 spin_unlock_irq(mlx4_tlock(dev
));
4918 static void rem_slave_mtts(struct mlx4_dev
*dev
, int slave
)
4920 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4921 struct mlx4_resource_tracker
*tracker
=
4922 &priv
->mfunc
.master
.res_tracker
;
4923 struct list_head
*mtt_list
=
4924 &tracker
->slave_list
[slave
].res_list
[RES_MTT
];
4925 struct res_mtt
*mtt
;
4926 struct res_mtt
*tmp
;
4932 err
= move_all_busy(dev
, slave
, RES_MTT
);
4934 mlx4_warn(dev
, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4937 spin_lock_irq(mlx4_tlock(dev
));
4938 list_for_each_entry_safe(mtt
, tmp
, mtt_list
, com
.list
) {
4939 spin_unlock_irq(mlx4_tlock(dev
));
4940 if (mtt
->com
.owner
== slave
) {
4941 base
= mtt
->com
.res_id
;
4942 state
= mtt
->com
.from_state
;
4943 while (state
!= 0) {
4945 case RES_MTT_ALLOCATED
:
4946 __mlx4_free_mtt_range(dev
, base
,
4948 spin_lock_irq(mlx4_tlock(dev
));
4949 rb_erase(&mtt
->com
.node
,
4950 &tracker
->res_tree
[RES_MTT
]);
4951 list_del(&mtt
->com
.list
);
4952 spin_unlock_irq(mlx4_tlock(dev
));
4953 mlx4_release_resource(dev
, slave
, RES_MTT
,
4954 1 << mtt
->order
, 0);
4964 spin_lock_irq(mlx4_tlock(dev
));
4966 spin_unlock_irq(mlx4_tlock(dev
));
4969 static int mlx4_do_mirror_rule(struct mlx4_dev
*dev
, struct res_fs_rule
*fs_rule
)
4971 struct mlx4_cmd_mailbox
*mailbox
;
4973 struct res_fs_rule
*mirr_rule
;
4976 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
4977 if (IS_ERR(mailbox
))
4978 return PTR_ERR(mailbox
);
4980 if (!fs_rule
->mirr_mbox
) {
4981 mlx4_err(dev
, "rule mirroring mailbox is null\n");
4984 memcpy(mailbox
->buf
, fs_rule
->mirr_mbox
, fs_rule
->mirr_mbox_size
);
4985 err
= mlx4_cmd_imm(dev
, mailbox
->dma
, ®_id
, fs_rule
->mirr_mbox_size
>> 2, 0,
4986 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
4988 mlx4_free_cmd_mailbox(dev
, mailbox
);
4993 err
= add_res_range(dev
, fs_rule
->com
.owner
, reg_id
, 1, RES_FS_RULE
, fs_rule
->qpn
);
4997 err
= get_res(dev
, fs_rule
->com
.owner
, reg_id
, RES_FS_RULE
, &mirr_rule
);
5001 fs_rule
->mirr_rule_id
= reg_id
;
5002 mirr_rule
->mirr_rule_id
= 0;
5003 mirr_rule
->mirr_mbox_size
= 0;
5004 mirr_rule
->mirr_mbox
= NULL
;
5005 put_res(dev
, fs_rule
->com
.owner
, reg_id
, RES_FS_RULE
);
5009 rem_res_range(dev
, fs_rule
->com
.owner
, reg_id
, 1, RES_FS_RULE
, 0);
5011 mlx4_cmd(dev
, reg_id
, 0, 0, MLX4_QP_FLOW_STEERING_DETACH
,
5012 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
5017 static int mlx4_mirror_fs_rules(struct mlx4_dev
*dev
, bool bond
)
5019 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5020 struct mlx4_resource_tracker
*tracker
=
5021 &priv
->mfunc
.master
.res_tracker
;
5022 struct rb_root
*root
= &tracker
->res_tree
[RES_FS_RULE
];
5024 struct res_fs_rule
*fs_rule
;
5026 LIST_HEAD(mirr_list
);
5028 for (p
= rb_first(root
); p
; p
= rb_next(p
)) {
5029 fs_rule
= rb_entry(p
, struct res_fs_rule
, com
.node
);
5030 if ((bond
&& fs_rule
->mirr_mbox_size
) ||
5031 (!bond
&& !fs_rule
->mirr_mbox_size
))
5032 list_add_tail(&fs_rule
->mirr_list
, &mirr_list
);
5035 list_for_each_entry(fs_rule
, &mirr_list
, mirr_list
) {
5037 err
+= mlx4_do_mirror_rule(dev
, fs_rule
);
5039 err
+= mlx4_undo_mirror_rule(dev
, fs_rule
);
5044 int mlx4_bond_fs_rules(struct mlx4_dev
*dev
)
5046 return mlx4_mirror_fs_rules(dev
, true);
5049 int mlx4_unbond_fs_rules(struct mlx4_dev
*dev
)
5051 return mlx4_mirror_fs_rules(dev
, false);
5054 static void rem_slave_fs_rule(struct mlx4_dev
*dev
, int slave
)
5056 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5057 struct mlx4_resource_tracker
*tracker
=
5058 &priv
->mfunc
.master
.res_tracker
;
5059 struct list_head
*fs_rule_list
=
5060 &tracker
->slave_list
[slave
].res_list
[RES_FS_RULE
];
5061 struct res_fs_rule
*fs_rule
;
5062 struct res_fs_rule
*tmp
;
5067 err
= move_all_busy(dev
, slave
, RES_FS_RULE
);
5069 mlx4_warn(dev
, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5072 spin_lock_irq(mlx4_tlock(dev
));
5073 list_for_each_entry_safe(fs_rule
, tmp
, fs_rule_list
, com
.list
) {
5074 spin_unlock_irq(mlx4_tlock(dev
));
5075 if (fs_rule
->com
.owner
== slave
) {
5076 base
= fs_rule
->com
.res_id
;
5077 state
= fs_rule
->com
.from_state
;
5078 while (state
!= 0) {
5080 case RES_FS_RULE_ALLOCATED
:
5082 err
= mlx4_cmd(dev
, base
, 0, 0,
5083 MLX4_QP_FLOW_STEERING_DETACH
,
5084 MLX4_CMD_TIME_CLASS_A
,
5087 spin_lock_irq(mlx4_tlock(dev
));
5088 rb_erase(&fs_rule
->com
.node
,
5089 &tracker
->res_tree
[RES_FS_RULE
]);
5090 list_del(&fs_rule
->com
.list
);
5091 spin_unlock_irq(mlx4_tlock(dev
));
5101 spin_lock_irq(mlx4_tlock(dev
));
5103 spin_unlock_irq(mlx4_tlock(dev
));
5106 static void rem_slave_eqs(struct mlx4_dev
*dev
, int slave
)
5108 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5109 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
5110 struct list_head
*eq_list
=
5111 &tracker
->slave_list
[slave
].res_list
[RES_EQ
];
5119 err
= move_all_busy(dev
, slave
, RES_EQ
);
5121 mlx4_warn(dev
, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5124 spin_lock_irq(mlx4_tlock(dev
));
5125 list_for_each_entry_safe(eq
, tmp
, eq_list
, com
.list
) {
5126 spin_unlock_irq(mlx4_tlock(dev
));
5127 if (eq
->com
.owner
== slave
) {
5128 eqn
= eq
->com
.res_id
;
5129 state
= eq
->com
.from_state
;
5130 while (state
!= 0) {
5132 case RES_EQ_RESERVED
:
5133 spin_lock_irq(mlx4_tlock(dev
));
5134 rb_erase(&eq
->com
.node
,
5135 &tracker
->res_tree
[RES_EQ
]);
5136 list_del(&eq
->com
.list
);
5137 spin_unlock_irq(mlx4_tlock(dev
));
5143 err
= mlx4_cmd(dev
, slave
, eqn
& 0x3ff,
5144 1, MLX4_CMD_HW2SW_EQ
,
5145 MLX4_CMD_TIME_CLASS_A
,
5148 mlx4_dbg(dev
, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5149 slave
, eqn
& 0x3ff);
5150 atomic_dec(&eq
->mtt
->ref_count
);
5151 state
= RES_EQ_RESERVED
;
5159 spin_lock_irq(mlx4_tlock(dev
));
5161 spin_unlock_irq(mlx4_tlock(dev
));
5164 static void rem_slave_counters(struct mlx4_dev
*dev
, int slave
)
5166 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5167 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
5168 struct list_head
*counter_list
=
5169 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
];
5170 struct res_counter
*counter
;
5171 struct res_counter
*tmp
;
5173 int *counters_arr
= NULL
;
5176 err
= move_all_busy(dev
, slave
, RES_COUNTER
);
5178 mlx4_warn(dev
, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5181 counters_arr
= kmalloc_array(dev
->caps
.max_counters
,
5182 sizeof(*counters_arr
), GFP_KERNEL
);
5189 spin_lock_irq(mlx4_tlock(dev
));
5190 list_for_each_entry_safe(counter
, tmp
, counter_list
, com
.list
) {
5191 if (counter
->com
.owner
== slave
) {
5192 counters_arr
[i
++] = counter
->com
.res_id
;
5193 rb_erase(&counter
->com
.node
,
5194 &tracker
->res_tree
[RES_COUNTER
]);
5195 list_del(&counter
->com
.list
);
5199 spin_unlock_irq(mlx4_tlock(dev
));
5202 __mlx4_counter_free(dev
, counters_arr
[j
++]);
5203 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
5207 kfree(counters_arr
);
5210 static void rem_slave_xrcdns(struct mlx4_dev
*dev
, int slave
)
5212 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5213 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
5214 struct list_head
*xrcdn_list
=
5215 &tracker
->slave_list
[slave
].res_list
[RES_XRCD
];
5216 struct res_xrcdn
*xrcd
;
5217 struct res_xrcdn
*tmp
;
5221 err
= move_all_busy(dev
, slave
, RES_XRCD
);
5223 mlx4_warn(dev
, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5226 spin_lock_irq(mlx4_tlock(dev
));
5227 list_for_each_entry_safe(xrcd
, tmp
, xrcdn_list
, com
.list
) {
5228 if (xrcd
->com
.owner
== slave
) {
5229 xrcdn
= xrcd
->com
.res_id
;
5230 rb_erase(&xrcd
->com
.node
, &tracker
->res_tree
[RES_XRCD
]);
5231 list_del(&xrcd
->com
.list
);
5233 __mlx4_xrcd_free(dev
, xrcdn
);
5236 spin_unlock_irq(mlx4_tlock(dev
));
5239 void mlx4_delete_all_resources_for_slave(struct mlx4_dev
*dev
, int slave
)
5241 struct mlx4_priv
*priv
= mlx4_priv(dev
);
5242 mlx4_reset_roce_gids(dev
, slave
);
5243 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
5244 rem_slave_vlans(dev
, slave
);
5245 rem_slave_macs(dev
, slave
);
5246 rem_slave_fs_rule(dev
, slave
);
5247 rem_slave_qps(dev
, slave
);
5248 rem_slave_srqs(dev
, slave
);
5249 rem_slave_cqs(dev
, slave
);
5250 rem_slave_mrs(dev
, slave
);
5251 rem_slave_eqs(dev
, slave
);
5252 rem_slave_mtts(dev
, slave
);
5253 rem_slave_counters(dev
, slave
);
5254 rem_slave_xrcdns(dev
, slave
);
5255 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
5258 static void update_qos_vpp(struct mlx4_update_qp_context
*ctx
,
5259 struct mlx4_vf_immed_vlan_work
*work
)
5261 ctx
->qp_mask
|= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP
);
5262 ctx
->qp_context
.qos_vport
= work
->qos_vport
;
5265 void mlx4_vf_immed_vlan_work_handler(struct work_struct
*_work
)
5267 struct mlx4_vf_immed_vlan_work
*work
=
5268 container_of(_work
, struct mlx4_vf_immed_vlan_work
, work
);
5269 struct mlx4_cmd_mailbox
*mailbox
;
5270 struct mlx4_update_qp_context
*upd_context
;
5271 struct mlx4_dev
*dev
= &work
->priv
->dev
;
5272 struct mlx4_resource_tracker
*tracker
=
5273 &work
->priv
->mfunc
.master
.res_tracker
;
5274 struct list_head
*qp_list
=
5275 &tracker
->slave_list
[work
->slave
].res_list
[RES_QP
];
5278 u64 qp_path_mask_vlan_ctrl
=
5279 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED
) |
5280 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P
) |
5281 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED
) |
5282 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED
) |
5283 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P
) |
5284 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED
));
5286 u64 qp_path_mask
= ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX
) |
5287 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL
) |
5288 (1ULL << MLX4_UPD_QP_PATH_MASK_CV
) |
5289 (1ULL << MLX4_UPD_QP_PATH_MASK_SV
) |
5290 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN
) |
5291 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP
) |
5292 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX
) |
5293 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE
));
5296 int port
, errors
= 0;
5299 if (mlx4_is_slave(dev
)) {
5300 mlx4_warn(dev
, "Trying to update-qp in slave %d\n",
5305 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
5306 if (IS_ERR(mailbox
))
5308 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE
) /* block all */
5309 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5310 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
5311 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
5312 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
5313 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
5314 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
5315 else if (!work
->vlan_id
)
5316 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5317 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
5318 else if (work
->vlan_proto
== htons(ETH_P_8021AD
))
5319 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
5320 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5321 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
5322 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
5323 else /* vst 802.1Q */
5324 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5325 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
5326 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
5328 upd_context
= mailbox
->buf
;
5329 upd_context
->qp_mask
= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD
);
5331 spin_lock_irq(mlx4_tlock(dev
));
5332 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
5333 spin_unlock_irq(mlx4_tlock(dev
));
5334 if (qp
->com
.owner
== work
->slave
) {
5335 if (qp
->com
.from_state
!= RES_QP_HW
||
5336 !qp
->sched_queue
|| /* no INIT2RTR trans yet */
5337 mlx4_is_qp_reserved(dev
, qp
->local_qpn
) ||
5338 qp
->qpc_flags
& (1 << MLX4_RSS_QPC_FLAG_OFFSET
)) {
5339 spin_lock_irq(mlx4_tlock(dev
));
5342 port
= (qp
->sched_queue
>> 6 & 1) + 1;
5343 if (port
!= work
->port
) {
5344 spin_lock_irq(mlx4_tlock(dev
));
5347 if (MLX4_QP_ST_RC
== ((qp
->qpc_flags
>> 16) & 0xff))
5348 upd_context
->primary_addr_path_mask
= cpu_to_be64(qp_path_mask
);
5350 upd_context
->primary_addr_path_mask
=
5351 cpu_to_be64(qp_path_mask
| qp_path_mask_vlan_ctrl
);
5352 if (work
->vlan_id
== MLX4_VGT
) {
5353 upd_context
->qp_context
.param3
= qp
->param3
;
5354 upd_context
->qp_context
.pri_path
.vlan_control
= qp
->vlan_control
;
5355 upd_context
->qp_context
.pri_path
.fvl_rx
= qp
->fvl_rx
;
5356 upd_context
->qp_context
.pri_path
.vlan_index
= qp
->vlan_index
;
5357 upd_context
->qp_context
.pri_path
.fl
= qp
->pri_path_fl
;
5358 upd_context
->qp_context
.pri_path
.feup
= qp
->feup
;
5359 upd_context
->qp_context
.pri_path
.sched_queue
=
5362 upd_context
->qp_context
.param3
= qp
->param3
& ~cpu_to_be32(MLX4_STRIP_VLAN
);
5363 upd_context
->qp_context
.pri_path
.vlan_control
= vlan_control
;
5364 upd_context
->qp_context
.pri_path
.vlan_index
= work
->vlan_ix
;
5365 upd_context
->qp_context
.pri_path
.fvl_rx
=
5366 qp
->fvl_rx
| MLX4_FVL_RX_FORCE_ETH_VLAN
;
5367 upd_context
->qp_context
.pri_path
.fl
=
5368 qp
->pri_path_fl
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
5369 if (work
->vlan_proto
== htons(ETH_P_8021AD
))
5370 upd_context
->qp_context
.pri_path
.fl
|= MLX4_FL_SV
;
5372 upd_context
->qp_context
.pri_path
.fl
|= MLX4_FL_CV
;
5373 upd_context
->qp_context
.pri_path
.feup
=
5374 qp
->feup
| MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
5375 upd_context
->qp_context
.pri_path
.sched_queue
=
5376 qp
->sched_queue
& 0xC7;
5377 upd_context
->qp_context
.pri_path
.sched_queue
|=
5378 ((work
->qos
& 0x7) << 3);
5380 if (dev
->caps
.flags2
&
5381 MLX4_DEV_CAP_FLAG2_QOS_VPP
)
5382 update_qos_vpp(upd_context
, work
);
5385 err
= mlx4_cmd(dev
, mailbox
->dma
,
5386 qp
->local_qpn
& 0xffffff,
5387 0, MLX4_CMD_UPDATE_QP
,
5388 MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
5390 mlx4_info(dev
, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5391 work
->slave
, port
, qp
->local_qpn
, err
);
5395 spin_lock_irq(mlx4_tlock(dev
));
5397 spin_unlock_irq(mlx4_tlock(dev
));
5398 mlx4_free_cmd_mailbox(dev
, mailbox
);
5401 mlx4_err(dev
, "%d UPDATE_QP failures for slave %d, port %d\n",
5402 errors
, work
->slave
, work
->port
);
5404 /* unregister previous vlan_id if needed and we had no errors
5405 * while updating the QPs
5407 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_VLAN
&& !errors
&&
5408 NO_INDX
!= work
->orig_vlan_ix
)
5409 __mlx4_unregister_vlan(&work
->priv
->dev
, work
->port
,
5410 work
->orig_vlan_id
);