2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list
;
59 struct list_head list
;
67 struct list_head list
;
82 struct list_head list
;
84 enum mlx4_protocol prot
;
85 enum mlx4_steer_type steer
;
90 RES_QP_BUSY
= RES_ANY_BUSY
,
92 /* QP number was allocated */
95 /* ICM memory for QP context was mapped */
98 /* QP is in hw ownership */
103 struct res_common com
;
108 struct list_head mcg_list
;
113 /* saved qp params before VST enforcement in order to restore on VGT */
123 enum res_mtt_states
{
124 RES_MTT_BUSY
= RES_ANY_BUSY
,
128 static inline const char *mtt_states_str(enum res_mtt_states state
)
131 case RES_MTT_BUSY
: return "RES_MTT_BUSY";
132 case RES_MTT_ALLOCATED
: return "RES_MTT_ALLOCATED";
133 default: return "Unknown";
138 struct res_common com
;
143 enum res_mpt_states
{
144 RES_MPT_BUSY
= RES_ANY_BUSY
,
151 struct res_common com
;
157 RES_EQ_BUSY
= RES_ANY_BUSY
,
163 struct res_common com
;
168 RES_CQ_BUSY
= RES_ANY_BUSY
,
174 struct res_common com
;
179 enum res_srq_states
{
180 RES_SRQ_BUSY
= RES_ANY_BUSY
,
186 struct res_common com
;
192 enum res_counter_states
{
193 RES_COUNTER_BUSY
= RES_ANY_BUSY
,
194 RES_COUNTER_ALLOCATED
,
198 struct res_common com
;
202 enum res_xrcdn_states
{
203 RES_XRCD_BUSY
= RES_ANY_BUSY
,
208 struct res_common com
;
212 enum res_fs_rule_states
{
213 RES_FS_RULE_BUSY
= RES_ANY_BUSY
,
214 RES_FS_RULE_ALLOCATED
,
218 struct res_common com
;
222 static void *res_tracker_lookup(struct rb_root
*root
, u64 res_id
)
224 struct rb_node
*node
= root
->rb_node
;
227 struct res_common
*res
= container_of(node
, struct res_common
,
230 if (res_id
< res
->res_id
)
231 node
= node
->rb_left
;
232 else if (res_id
> res
->res_id
)
233 node
= node
->rb_right
;
240 static int res_tracker_insert(struct rb_root
*root
, struct res_common
*res
)
242 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
244 /* Figure out where to put new node */
246 struct res_common
*this = container_of(*new, struct res_common
,
250 if (res
->res_id
< this->res_id
)
251 new = &((*new)->rb_left
);
252 else if (res
->res_id
> this->res_id
)
253 new = &((*new)->rb_right
);
258 /* Add new node and rebalance tree. */
259 rb_link_node(&res
->node
, parent
, new);
260 rb_insert_color(&res
->node
, root
);
275 static const char *ResourceType(enum mlx4_resource rt
)
278 case RES_QP
: return "RES_QP";
279 case RES_CQ
: return "RES_CQ";
280 case RES_SRQ
: return "RES_SRQ";
281 case RES_MPT
: return "RES_MPT";
282 case RES_MTT
: return "RES_MTT";
283 case RES_MAC
: return "RES_MAC";
284 case RES_VLAN
: return "RES_VLAN";
285 case RES_EQ
: return "RES_EQ";
286 case RES_COUNTER
: return "RES_COUNTER";
287 case RES_FS_RULE
: return "RES_FS_RULE";
288 case RES_XRCD
: return "RES_XRCD";
289 default: return "Unknown resource type !!!";
293 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
);
294 static inline int mlx4_grant_resource(struct mlx4_dev
*dev
, int slave
,
295 enum mlx4_resource res_type
, int count
,
298 struct mlx4_priv
*priv
= mlx4_priv(dev
);
299 struct resource_allocator
*res_alloc
=
300 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
302 int allocated
, free
, reserved
, guaranteed
, from_free
;
304 if (slave
> dev
->num_vfs
)
307 spin_lock(&res_alloc
->alloc_lock
);
308 allocated
= (port
> 0) ?
309 res_alloc
->allocated
[(port
- 1) * (dev
->num_vfs
+ 1) + slave
] :
310 res_alloc
->allocated
[slave
];
311 free
= (port
> 0) ? res_alloc
->res_port_free
[port
- 1] :
313 reserved
= (port
> 0) ? res_alloc
->res_port_rsvd
[port
- 1] :
314 res_alloc
->res_reserved
;
315 guaranteed
= res_alloc
->guaranteed
[slave
];
317 if (allocated
+ count
> res_alloc
->quota
[slave
])
320 if (allocated
+ count
<= guaranteed
) {
323 /* portion may need to be obtained from free area */
324 if (guaranteed
- allocated
> 0)
325 from_free
= count
- (guaranteed
- allocated
);
329 if (free
- from_free
> reserved
)
334 /* grant the request */
336 res_alloc
->allocated
[(port
- 1) * (dev
->num_vfs
+ 1) + slave
] += count
;
337 res_alloc
->res_port_free
[port
- 1] -= count
;
339 res_alloc
->allocated
[slave
] += count
;
340 res_alloc
->res_free
-= count
;
345 spin_unlock(&res_alloc
->alloc_lock
);
349 static inline void mlx4_release_resource(struct mlx4_dev
*dev
, int slave
,
350 enum mlx4_resource res_type
, int count
,
353 struct mlx4_priv
*priv
= mlx4_priv(dev
);
354 struct resource_allocator
*res_alloc
=
355 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
357 if (slave
> dev
->num_vfs
)
360 spin_lock(&res_alloc
->alloc_lock
);
362 res_alloc
->allocated
[(port
- 1) * (dev
->num_vfs
+ 1) + slave
] -= count
;
363 res_alloc
->res_port_free
[port
- 1] += count
;
365 res_alloc
->allocated
[slave
] -= count
;
366 res_alloc
->res_free
+= count
;
369 spin_unlock(&res_alloc
->alloc_lock
);
373 static inline void initialize_res_quotas(struct mlx4_dev
*dev
,
374 struct resource_allocator
*res_alloc
,
375 enum mlx4_resource res_type
,
376 int vf
, int num_instances
)
378 res_alloc
->guaranteed
[vf
] = num_instances
/ (2 * (dev
->num_vfs
+ 1));
379 res_alloc
->quota
[vf
] = (num_instances
/ 2) + res_alloc
->guaranteed
[vf
];
380 if (vf
== mlx4_master_func_num(dev
)) {
381 res_alloc
->res_free
= num_instances
;
382 if (res_type
== RES_MTT
) {
383 /* reserved mtts will be taken out of the PF allocation */
384 res_alloc
->res_free
+= dev
->caps
.reserved_mtts
;
385 res_alloc
->guaranteed
[vf
] += dev
->caps
.reserved_mtts
;
386 res_alloc
->quota
[vf
] += dev
->caps
.reserved_mtts
;
391 void mlx4_init_quotas(struct mlx4_dev
*dev
)
393 struct mlx4_priv
*priv
= mlx4_priv(dev
);
396 /* quotas for VFs are initialized in mlx4_slave_cap */
397 if (mlx4_is_slave(dev
))
400 if (!mlx4_is_mfunc(dev
)) {
401 dev
->quotas
.qp
= dev
->caps
.num_qps
- dev
->caps
.reserved_qps
-
402 mlx4_num_reserved_sqps(dev
);
403 dev
->quotas
.cq
= dev
->caps
.num_cqs
- dev
->caps
.reserved_cqs
;
404 dev
->quotas
.srq
= dev
->caps
.num_srqs
- dev
->caps
.reserved_srqs
;
405 dev
->quotas
.mtt
= dev
->caps
.num_mtts
- dev
->caps
.reserved_mtts
;
406 dev
->quotas
.mpt
= dev
->caps
.num_mpts
- dev
->caps
.reserved_mrws
;
410 pf
= mlx4_master_func_num(dev
);
412 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[pf
];
414 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[pf
];
416 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[pf
];
418 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[pf
];
420 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[pf
];
422 int mlx4_init_resource_tracker(struct mlx4_dev
*dev
)
424 struct mlx4_priv
*priv
= mlx4_priv(dev
);
428 priv
->mfunc
.master
.res_tracker
.slave_list
=
429 kzalloc(dev
->num_slaves
* sizeof(struct slave_list
),
431 if (!priv
->mfunc
.master
.res_tracker
.slave_list
)
434 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
435 for (t
= 0; t
< MLX4_NUM_OF_RESOURCE_TYPE
; ++t
)
436 INIT_LIST_HEAD(&priv
->mfunc
.master
.res_tracker
.
437 slave_list
[i
].res_list
[t
]);
438 mutex_init(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
441 mlx4_dbg(dev
, "Started init_resource_tracker: %ld slaves\n",
443 for (i
= 0 ; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++)
444 priv
->mfunc
.master
.res_tracker
.res_tree
[i
] = RB_ROOT
;
446 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
447 struct resource_allocator
*res_alloc
=
448 &priv
->mfunc
.master
.res_tracker
.res_alloc
[i
];
449 res_alloc
->quota
= kmalloc((dev
->num_vfs
+ 1) * sizeof(int), GFP_KERNEL
);
450 res_alloc
->guaranteed
= kmalloc((dev
->num_vfs
+ 1) * sizeof(int), GFP_KERNEL
);
451 if (i
== RES_MAC
|| i
== RES_VLAN
)
452 res_alloc
->allocated
= kzalloc(MLX4_MAX_PORTS
*
453 (dev
->num_vfs
+ 1) * sizeof(int),
456 res_alloc
->allocated
= kzalloc((dev
->num_vfs
+ 1) * sizeof(int), GFP_KERNEL
);
458 if (!res_alloc
->quota
|| !res_alloc
->guaranteed
||
459 !res_alloc
->allocated
)
462 spin_lock_init(&res_alloc
->alloc_lock
);
463 for (t
= 0; t
< dev
->num_vfs
+ 1; t
++) {
466 initialize_res_quotas(dev
, res_alloc
, RES_QP
,
467 t
, dev
->caps
.num_qps
-
468 dev
->caps
.reserved_qps
-
469 mlx4_num_reserved_sqps(dev
));
472 initialize_res_quotas(dev
, res_alloc
, RES_CQ
,
473 t
, dev
->caps
.num_cqs
-
474 dev
->caps
.reserved_cqs
);
477 initialize_res_quotas(dev
, res_alloc
, RES_SRQ
,
478 t
, dev
->caps
.num_srqs
-
479 dev
->caps
.reserved_srqs
);
482 initialize_res_quotas(dev
, res_alloc
, RES_MPT
,
483 t
, dev
->caps
.num_mpts
-
484 dev
->caps
.reserved_mrws
);
487 initialize_res_quotas(dev
, res_alloc
, RES_MTT
,
488 t
, dev
->caps
.num_mtts
-
489 dev
->caps
.reserved_mtts
);
492 if (t
== mlx4_master_func_num(dev
)) {
493 res_alloc
->quota
[t
] = MLX4_MAX_MAC_NUM
;
494 res_alloc
->guaranteed
[t
] = 2;
495 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
496 res_alloc
->res_port_free
[j
] = MLX4_MAX_MAC_NUM
;
498 res_alloc
->quota
[t
] = MLX4_MAX_MAC_NUM
;
499 res_alloc
->guaranteed
[t
] = 2;
503 if (t
== mlx4_master_func_num(dev
)) {
504 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
;
505 res_alloc
->guaranteed
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
506 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
507 res_alloc
->res_port_free
[j
] =
510 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
511 res_alloc
->guaranteed
[t
] = 0;
515 res_alloc
->quota
[t
] = dev
->caps
.max_counters
;
516 res_alloc
->guaranteed
[t
] = 0;
517 if (t
== mlx4_master_func_num(dev
))
518 res_alloc
->res_free
= res_alloc
->quota
[t
];
523 if (i
== RES_MAC
|| i
== RES_VLAN
) {
524 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
525 res_alloc
->res_port_rsvd
[j
] +=
526 res_alloc
->guaranteed
[t
];
528 res_alloc
->res_reserved
+= res_alloc
->guaranteed
[t
];
532 spin_lock_init(&priv
->mfunc
.master
.res_tracker
.lock
);
536 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
537 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
538 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
539 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
540 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
541 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
542 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
547 void mlx4_free_resource_tracker(struct mlx4_dev
*dev
,
548 enum mlx4_res_tracker_free_type type
)
550 struct mlx4_priv
*priv
= mlx4_priv(dev
);
553 if (priv
->mfunc
.master
.res_tracker
.slave_list
) {
554 if (type
!= RES_TR_FREE_STRUCTS_ONLY
) {
555 for (i
= 0; i
< dev
->num_slaves
; i
++) {
556 if (type
== RES_TR_FREE_ALL
||
557 dev
->caps
.function
!= i
)
558 mlx4_delete_all_resources_for_slave(dev
, i
);
560 /* free master's vlans */
561 i
= dev
->caps
.function
;
562 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
563 rem_slave_vlans(dev
, i
);
564 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
567 if (type
!= RES_TR_FREE_SLAVES_ONLY
) {
568 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
569 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
570 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
571 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
572 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
573 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
574 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
576 kfree(priv
->mfunc
.master
.res_tracker
.slave_list
);
577 priv
->mfunc
.master
.res_tracker
.slave_list
= NULL
;
582 static void update_pkey_index(struct mlx4_dev
*dev
, int slave
,
583 struct mlx4_cmd_mailbox
*inbox
)
585 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
586 u8 orig_index
= *(u8
*)(inbox
->buf
+ 35);
588 struct mlx4_priv
*priv
= mlx4_priv(dev
);
591 port
= (sched
>> 6 & 1) + 1;
593 new_index
= priv
->virt2phys_pkey
[slave
][port
- 1][orig_index
];
594 *(u8
*)(inbox
->buf
+ 35) = new_index
;
597 static void update_gid(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*inbox
,
600 struct mlx4_qp_context
*qp_ctx
= inbox
->buf
+ 8;
601 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
602 u32 ts
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
604 if (MLX4_QP_ST_UD
== ts
)
605 qp_ctx
->pri_path
.mgid_index
= 0x80 | slave
;
607 if (MLX4_QP_ST_RC
== ts
|| MLX4_QP_ST_UC
== ts
) {
608 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
)
609 qp_ctx
->pri_path
.mgid_index
= slave
& 0x7F;
610 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
)
611 qp_ctx
->alt_path
.mgid_index
= slave
& 0x7F;
615 static int update_vport_qp_param(struct mlx4_dev
*dev
,
616 struct mlx4_cmd_mailbox
*inbox
,
619 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
620 struct mlx4_vport_oper_state
*vp_oper
;
621 struct mlx4_priv
*priv
;
625 port
= (qpc
->pri_path
.sched_queue
& 0x40) ? 2 : 1;
626 priv
= mlx4_priv(dev
);
627 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
629 if (MLX4_VGT
!= vp_oper
->state
.default_vlan
) {
630 qp_type
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
631 if (MLX4_QP_ST_RC
== qp_type
||
632 (MLX4_QP_ST_UD
== qp_type
&&
633 !mlx4_is_qp_reserved(dev
, qpn
)))
636 /* the reserved QPs (special, proxy, tunnel)
637 * do not operate over vlans
639 if (mlx4_is_qp_reserved(dev
, qpn
))
642 /* force strip vlan by clear vsd */
643 qpc
->param3
&= ~cpu_to_be32(MLX4_STRIP_VLAN
);
645 if (vp_oper
->state
.link_state
== IFLA_VF_LINK_STATE_DISABLE
&&
646 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
) {
647 qpc
->pri_path
.vlan_control
=
648 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
649 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
650 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
651 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
652 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
653 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
654 } else if (0 != vp_oper
->state
.default_vlan
) {
655 qpc
->pri_path
.vlan_control
=
656 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
657 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
658 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
659 } else { /* priority tagged */
660 qpc
->pri_path
.vlan_control
=
661 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
662 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
665 qpc
->pri_path
.fvl_rx
|= MLX4_FVL_RX_FORCE_ETH_VLAN
;
666 qpc
->pri_path
.vlan_index
= vp_oper
->vlan_idx
;
667 qpc
->pri_path
.fl
|= MLX4_FL_CV
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
668 qpc
->pri_path
.feup
|= MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
669 qpc
->pri_path
.sched_queue
&= 0xC7;
670 qpc
->pri_path
.sched_queue
|= (vp_oper
->state
.default_qos
) << 3;
672 if (vp_oper
->state
.spoofchk
) {
673 qpc
->pri_path
.feup
|= MLX4_FSM_FORCE_ETH_SRC_MAC
;
674 qpc
->pri_path
.grh_mylmc
= (0x80 & qpc
->pri_path
.grh_mylmc
) + vp_oper
->mac_idx
;
679 static int mpt_mask(struct mlx4_dev
*dev
)
681 return dev
->caps
.num_mpts
- 1;
684 static void *find_res(struct mlx4_dev
*dev
, u64 res_id
,
685 enum mlx4_resource type
)
687 struct mlx4_priv
*priv
= mlx4_priv(dev
);
689 return res_tracker_lookup(&priv
->mfunc
.master
.res_tracker
.res_tree
[type
],
693 static int get_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
694 enum mlx4_resource type
,
697 struct res_common
*r
;
700 spin_lock_irq(mlx4_tlock(dev
));
701 r
= find_res(dev
, res_id
, type
);
707 if (r
->state
== RES_ANY_BUSY
) {
712 if (r
->owner
!= slave
) {
717 r
->from_state
= r
->state
;
718 r
->state
= RES_ANY_BUSY
;
721 *((struct res_common
**)res
) = r
;
724 spin_unlock_irq(mlx4_tlock(dev
));
728 int mlx4_get_slave_from_resource_id(struct mlx4_dev
*dev
,
729 enum mlx4_resource type
,
730 u64 res_id
, int *slave
)
733 struct res_common
*r
;
739 spin_lock(mlx4_tlock(dev
));
741 r
= find_res(dev
, id
, type
);
746 spin_unlock(mlx4_tlock(dev
));
751 static void put_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
752 enum mlx4_resource type
)
754 struct res_common
*r
;
756 spin_lock_irq(mlx4_tlock(dev
));
757 r
= find_res(dev
, res_id
, type
);
759 r
->state
= r
->from_state
;
760 spin_unlock_irq(mlx4_tlock(dev
));
763 static struct res_common
*alloc_qp_tr(int id
)
767 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
771 ret
->com
.res_id
= id
;
772 ret
->com
.state
= RES_QP_RESERVED
;
774 INIT_LIST_HEAD(&ret
->mcg_list
);
775 spin_lock_init(&ret
->mcg_spl
);
776 atomic_set(&ret
->ref_count
, 0);
781 static struct res_common
*alloc_mtt_tr(int id
, int order
)
785 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
789 ret
->com
.res_id
= id
;
791 ret
->com
.state
= RES_MTT_ALLOCATED
;
792 atomic_set(&ret
->ref_count
, 0);
797 static struct res_common
*alloc_mpt_tr(int id
, int key
)
801 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
805 ret
->com
.res_id
= id
;
806 ret
->com
.state
= RES_MPT_RESERVED
;
812 static struct res_common
*alloc_eq_tr(int id
)
816 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
820 ret
->com
.res_id
= id
;
821 ret
->com
.state
= RES_EQ_RESERVED
;
826 static struct res_common
*alloc_cq_tr(int id
)
830 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
834 ret
->com
.res_id
= id
;
835 ret
->com
.state
= RES_CQ_ALLOCATED
;
836 atomic_set(&ret
->ref_count
, 0);
841 static struct res_common
*alloc_srq_tr(int id
)
845 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
849 ret
->com
.res_id
= id
;
850 ret
->com
.state
= RES_SRQ_ALLOCATED
;
851 atomic_set(&ret
->ref_count
, 0);
856 static struct res_common
*alloc_counter_tr(int id
)
858 struct res_counter
*ret
;
860 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
864 ret
->com
.res_id
= id
;
865 ret
->com
.state
= RES_COUNTER_ALLOCATED
;
870 static struct res_common
*alloc_xrcdn_tr(int id
)
872 struct res_xrcdn
*ret
;
874 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
878 ret
->com
.res_id
= id
;
879 ret
->com
.state
= RES_XRCD_ALLOCATED
;
884 static struct res_common
*alloc_fs_rule_tr(u64 id
, int qpn
)
886 struct res_fs_rule
*ret
;
888 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
892 ret
->com
.res_id
= id
;
893 ret
->com
.state
= RES_FS_RULE_ALLOCATED
;
898 static struct res_common
*alloc_tr(u64 id
, enum mlx4_resource type
, int slave
,
901 struct res_common
*ret
;
905 ret
= alloc_qp_tr(id
);
908 ret
= alloc_mpt_tr(id
, extra
);
911 ret
= alloc_mtt_tr(id
, extra
);
914 ret
= alloc_eq_tr(id
);
917 ret
= alloc_cq_tr(id
);
920 ret
= alloc_srq_tr(id
);
923 printk(KERN_ERR
"implementation missing\n");
926 ret
= alloc_counter_tr(id
);
929 ret
= alloc_xrcdn_tr(id
);
932 ret
= alloc_fs_rule_tr(id
, extra
);
943 static int add_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
944 enum mlx4_resource type
, int extra
)
948 struct mlx4_priv
*priv
= mlx4_priv(dev
);
949 struct res_common
**res_arr
;
950 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
951 struct rb_root
*root
= &tracker
->res_tree
[type
];
953 res_arr
= kzalloc(count
* sizeof *res_arr
, GFP_KERNEL
);
957 for (i
= 0; i
< count
; ++i
) {
958 res_arr
[i
] = alloc_tr(base
+ i
, type
, slave
, extra
);
960 for (--i
; i
>= 0; --i
)
968 spin_lock_irq(mlx4_tlock(dev
));
969 for (i
= 0; i
< count
; ++i
) {
970 if (find_res(dev
, base
+ i
, type
)) {
974 err
= res_tracker_insert(root
, res_arr
[i
]);
977 list_add_tail(&res_arr
[i
]->list
,
978 &tracker
->slave_list
[slave
].res_list
[type
]);
980 spin_unlock_irq(mlx4_tlock(dev
));
986 for (--i
; i
>= base
; --i
)
987 rb_erase(&res_arr
[i
]->node
, root
);
989 spin_unlock_irq(mlx4_tlock(dev
));
991 for (i
= 0; i
< count
; ++i
)
999 static int remove_qp_ok(struct res_qp
*res
)
1001 if (res
->com
.state
== RES_QP_BUSY
|| atomic_read(&res
->ref_count
) ||
1002 !list_empty(&res
->mcg_list
)) {
1003 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1004 res
->com
.state
, atomic_read(&res
->ref_count
));
1006 } else if (res
->com
.state
!= RES_QP_RESERVED
) {
1013 static int remove_mtt_ok(struct res_mtt
*res
, int order
)
1015 if (res
->com
.state
== RES_MTT_BUSY
||
1016 atomic_read(&res
->ref_count
)) {
1017 printk(KERN_DEBUG
"%s-%d: state %s, ref_count %d\n",
1019 mtt_states_str(res
->com
.state
),
1020 atomic_read(&res
->ref_count
));
1022 } else if (res
->com
.state
!= RES_MTT_ALLOCATED
)
1024 else if (res
->order
!= order
)
1030 static int remove_mpt_ok(struct res_mpt
*res
)
1032 if (res
->com
.state
== RES_MPT_BUSY
)
1034 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1040 static int remove_eq_ok(struct res_eq
*res
)
1042 if (res
->com
.state
== RES_MPT_BUSY
)
1044 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1050 static int remove_counter_ok(struct res_counter
*res
)
1052 if (res
->com
.state
== RES_COUNTER_BUSY
)
1054 else if (res
->com
.state
!= RES_COUNTER_ALLOCATED
)
1060 static int remove_xrcdn_ok(struct res_xrcdn
*res
)
1062 if (res
->com
.state
== RES_XRCD_BUSY
)
1064 else if (res
->com
.state
!= RES_XRCD_ALLOCATED
)
1070 static int remove_fs_rule_ok(struct res_fs_rule
*res
)
1072 if (res
->com
.state
== RES_FS_RULE_BUSY
)
1074 else if (res
->com
.state
!= RES_FS_RULE_ALLOCATED
)
1080 static int remove_cq_ok(struct res_cq
*res
)
1082 if (res
->com
.state
== RES_CQ_BUSY
)
1084 else if (res
->com
.state
!= RES_CQ_ALLOCATED
)
1090 static int remove_srq_ok(struct res_srq
*res
)
1092 if (res
->com
.state
== RES_SRQ_BUSY
)
1094 else if (res
->com
.state
!= RES_SRQ_ALLOCATED
)
1100 static int remove_ok(struct res_common
*res
, enum mlx4_resource type
, int extra
)
1104 return remove_qp_ok((struct res_qp
*)res
);
1106 return remove_cq_ok((struct res_cq
*)res
);
1108 return remove_srq_ok((struct res_srq
*)res
);
1110 return remove_mpt_ok((struct res_mpt
*)res
);
1112 return remove_mtt_ok((struct res_mtt
*)res
, extra
);
1116 return remove_eq_ok((struct res_eq
*)res
);
1118 return remove_counter_ok((struct res_counter
*)res
);
1120 return remove_xrcdn_ok((struct res_xrcdn
*)res
);
1122 return remove_fs_rule_ok((struct res_fs_rule
*)res
);
1128 static int rem_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1129 enum mlx4_resource type
, int extra
)
1133 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1134 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1135 struct res_common
*r
;
1137 spin_lock_irq(mlx4_tlock(dev
));
1138 for (i
= base
; i
< base
+ count
; ++i
) {
1139 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1144 if (r
->owner
!= slave
) {
1148 err
= remove_ok(r
, type
, extra
);
1153 for (i
= base
; i
< base
+ count
; ++i
) {
1154 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1155 rb_erase(&r
->node
, &tracker
->res_tree
[type
]);
1162 spin_unlock_irq(mlx4_tlock(dev
));
1167 static int qp_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int qpn
,
1168 enum res_qp_states state
, struct res_qp
**qp
,
1171 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1172 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1176 spin_lock_irq(mlx4_tlock(dev
));
1177 r
= res_tracker_lookup(&tracker
->res_tree
[RES_QP
], qpn
);
1180 else if (r
->com
.owner
!= slave
)
1185 mlx4_dbg(dev
, "%s: failed RES_QP, 0x%llx\n",
1186 __func__
, r
->com
.res_id
);
1190 case RES_QP_RESERVED
:
1191 if (r
->com
.state
== RES_QP_MAPPED
&& !alloc
)
1194 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n", r
->com
.res_id
);
1199 if ((r
->com
.state
== RES_QP_RESERVED
&& alloc
) ||
1200 r
->com
.state
== RES_QP_HW
)
1203 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n",
1211 if (r
->com
.state
!= RES_QP_MAPPED
)
1219 r
->com
.from_state
= r
->com
.state
;
1220 r
->com
.to_state
= state
;
1221 r
->com
.state
= RES_QP_BUSY
;
1227 spin_unlock_irq(mlx4_tlock(dev
));
1232 static int mr_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1233 enum res_mpt_states state
, struct res_mpt
**mpt
)
1235 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1236 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1240 spin_lock_irq(mlx4_tlock(dev
));
1241 r
= res_tracker_lookup(&tracker
->res_tree
[RES_MPT
], index
);
1244 else if (r
->com
.owner
!= slave
)
1252 case RES_MPT_RESERVED
:
1253 if (r
->com
.state
!= RES_MPT_MAPPED
)
1257 case RES_MPT_MAPPED
:
1258 if (r
->com
.state
!= RES_MPT_RESERVED
&&
1259 r
->com
.state
!= RES_MPT_HW
)
1264 if (r
->com
.state
!= RES_MPT_MAPPED
)
1272 r
->com
.from_state
= r
->com
.state
;
1273 r
->com
.to_state
= state
;
1274 r
->com
.state
= RES_MPT_BUSY
;
1280 spin_unlock_irq(mlx4_tlock(dev
));
1285 static int eq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1286 enum res_eq_states state
, struct res_eq
**eq
)
1288 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1289 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1293 spin_lock_irq(mlx4_tlock(dev
));
1294 r
= res_tracker_lookup(&tracker
->res_tree
[RES_EQ
], index
);
1297 else if (r
->com
.owner
!= slave
)
1305 case RES_EQ_RESERVED
:
1306 if (r
->com
.state
!= RES_EQ_HW
)
1311 if (r
->com
.state
!= RES_EQ_RESERVED
)
1320 r
->com
.from_state
= r
->com
.state
;
1321 r
->com
.to_state
= state
;
1322 r
->com
.state
= RES_EQ_BUSY
;
1328 spin_unlock_irq(mlx4_tlock(dev
));
1333 static int cq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int cqn
,
1334 enum res_cq_states state
, struct res_cq
**cq
)
1336 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1337 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1341 spin_lock_irq(mlx4_tlock(dev
));
1342 r
= res_tracker_lookup(&tracker
->res_tree
[RES_CQ
], cqn
);
1345 } else if (r
->com
.owner
!= slave
) {
1347 } else if (state
== RES_CQ_ALLOCATED
) {
1348 if (r
->com
.state
!= RES_CQ_HW
)
1350 else if (atomic_read(&r
->ref_count
))
1354 } else if (state
!= RES_CQ_HW
|| r
->com
.state
!= RES_CQ_ALLOCATED
) {
1361 r
->com
.from_state
= r
->com
.state
;
1362 r
->com
.to_state
= state
;
1363 r
->com
.state
= RES_CQ_BUSY
;
1368 spin_unlock_irq(mlx4_tlock(dev
));
1373 static int srq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1374 enum res_srq_states state
, struct res_srq
**srq
)
1376 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1377 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1381 spin_lock_irq(mlx4_tlock(dev
));
1382 r
= res_tracker_lookup(&tracker
->res_tree
[RES_SRQ
], index
);
1385 } else if (r
->com
.owner
!= slave
) {
1387 } else if (state
== RES_SRQ_ALLOCATED
) {
1388 if (r
->com
.state
!= RES_SRQ_HW
)
1390 else if (atomic_read(&r
->ref_count
))
1392 } else if (state
!= RES_SRQ_HW
|| r
->com
.state
!= RES_SRQ_ALLOCATED
) {
1397 r
->com
.from_state
= r
->com
.state
;
1398 r
->com
.to_state
= state
;
1399 r
->com
.state
= RES_SRQ_BUSY
;
1404 spin_unlock_irq(mlx4_tlock(dev
));
1409 static void res_abort_move(struct mlx4_dev
*dev
, int slave
,
1410 enum mlx4_resource type
, int id
)
1412 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1413 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1414 struct res_common
*r
;
1416 spin_lock_irq(mlx4_tlock(dev
));
1417 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1418 if (r
&& (r
->owner
== slave
))
1419 r
->state
= r
->from_state
;
1420 spin_unlock_irq(mlx4_tlock(dev
));
1423 static void res_end_move(struct mlx4_dev
*dev
, int slave
,
1424 enum mlx4_resource type
, int id
)
1426 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1427 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1428 struct res_common
*r
;
1430 spin_lock_irq(mlx4_tlock(dev
));
1431 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1432 if (r
&& (r
->owner
== slave
))
1433 r
->state
= r
->to_state
;
1434 spin_unlock_irq(mlx4_tlock(dev
));
1437 static int valid_reserved(struct mlx4_dev
*dev
, int slave
, int qpn
)
1439 return mlx4_is_qp_reserved(dev
, qpn
) &&
1440 (mlx4_is_master(dev
) || mlx4_is_guest_proxy(dev
, slave
, qpn
));
1443 static int fw_reserved(struct mlx4_dev
*dev
, int qpn
)
1445 return qpn
< dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
1448 static int qp_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1449 u64 in_param
, u64
*out_param
)
1458 case RES_OP_RESERVE
:
1459 count
= get_param_l(&in_param
);
1460 align
= get_param_h(&in_param
);
1461 err
= mlx4_grant_resource(dev
, slave
, RES_QP
, count
, 0);
1465 err
= __mlx4_qp_reserve_range(dev
, count
, align
, &base
);
1467 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1471 err
= add_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1473 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1474 __mlx4_qp_release_range(dev
, base
, count
);
1477 set_param_l(out_param
, base
);
1479 case RES_OP_MAP_ICM
:
1480 qpn
= get_param_l(&in_param
) & 0x7fffff;
1481 if (valid_reserved(dev
, slave
, qpn
)) {
1482 err
= add_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1487 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
,
1492 if (!fw_reserved(dev
, qpn
)) {
1493 err
= __mlx4_qp_alloc_icm(dev
, qpn
);
1495 res_abort_move(dev
, slave
, RES_QP
, qpn
);
1500 res_end_move(dev
, slave
, RES_QP
, qpn
);
1510 static int mtt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1511 u64 in_param
, u64
*out_param
)
1517 if (op
!= RES_OP_RESERVE_AND_MAP
)
1520 order
= get_param_l(&in_param
);
1522 err
= mlx4_grant_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1526 base
= __mlx4_alloc_mtt_range(dev
, order
);
1528 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1532 err
= add_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1534 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1535 __mlx4_free_mtt_range(dev
, base
, order
);
1537 set_param_l(out_param
, base
);
1543 static int mpt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1544 u64 in_param
, u64
*out_param
)
1549 struct res_mpt
*mpt
;
1552 case RES_OP_RESERVE
:
1553 err
= mlx4_grant_resource(dev
, slave
, RES_MPT
, 1, 0);
1557 index
= __mlx4_mpt_reserve(dev
);
1559 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1562 id
= index
& mpt_mask(dev
);
1564 err
= add_res_range(dev
, slave
, id
, 1, RES_MPT
, index
);
1566 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1567 __mlx4_mpt_release(dev
, index
);
1570 set_param_l(out_param
, index
);
1572 case RES_OP_MAP_ICM
:
1573 index
= get_param_l(&in_param
);
1574 id
= index
& mpt_mask(dev
);
1575 err
= mr_res_start_move_to(dev
, slave
, id
,
1576 RES_MPT_MAPPED
, &mpt
);
1580 err
= __mlx4_mpt_alloc_icm(dev
, mpt
->key
);
1582 res_abort_move(dev
, slave
, RES_MPT
, id
);
1586 res_end_move(dev
, slave
, RES_MPT
, id
);
1592 static int cq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1593 u64 in_param
, u64
*out_param
)
1599 case RES_OP_RESERVE_AND_MAP
:
1600 err
= mlx4_grant_resource(dev
, slave
, RES_CQ
, 1, 0);
1604 err
= __mlx4_cq_alloc_icm(dev
, &cqn
);
1606 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1610 err
= add_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1612 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1613 __mlx4_cq_free_icm(dev
, cqn
);
1617 set_param_l(out_param
, cqn
);
1627 static int srq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1628 u64 in_param
, u64
*out_param
)
1634 case RES_OP_RESERVE_AND_MAP
:
1635 err
= mlx4_grant_resource(dev
, slave
, RES_SRQ
, 1, 0);
1639 err
= __mlx4_srq_alloc_icm(dev
, &srqn
);
1641 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1645 err
= add_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1647 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1648 __mlx4_srq_free_icm(dev
, srqn
);
1652 set_param_l(out_param
, srqn
);
1662 static int mac_add_to_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
, int port
)
1664 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1665 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1666 struct mac_res
*res
;
1668 if (mlx4_grant_resource(dev
, slave
, RES_MAC
, 1, port
))
1670 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
1672 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
1676 res
->port
= (u8
) port
;
1677 list_add_tail(&res
->list
,
1678 &tracker
->slave_list
[slave
].res_list
[RES_MAC
]);
1682 static void mac_del_from_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
,
1685 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1686 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1687 struct list_head
*mac_list
=
1688 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1689 struct mac_res
*res
, *tmp
;
1691 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1692 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
1693 list_del(&res
->list
);
1694 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
1701 static void rem_slave_macs(struct mlx4_dev
*dev
, int slave
)
1703 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1704 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1705 struct list_head
*mac_list
=
1706 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1707 struct mac_res
*res
, *tmp
;
1709 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1710 list_del(&res
->list
);
1711 __mlx4_unregister_mac(dev
, res
->port
, res
->mac
);
1712 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, res
->port
);
1717 static int mac_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1718 u64 in_param
, u64
*out_param
, int in_port
)
1724 if (op
!= RES_OP_RESERVE_AND_MAP
)
1727 port
= !in_port
? get_param_l(out_param
) : in_port
;
1730 err
= __mlx4_register_mac(dev
, port
, mac
);
1732 set_param_l(out_param
, err
);
1737 err
= mac_add_to_slave(dev
, slave
, mac
, port
);
1739 __mlx4_unregister_mac(dev
, port
, mac
);
1744 static int vlan_add_to_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
1745 int port
, int vlan_index
)
1747 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1748 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1749 struct list_head
*vlan_list
=
1750 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
1751 struct vlan_res
*res
, *tmp
;
1753 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
1754 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
1755 /* vlan found. update ref count */
1761 if (mlx4_grant_resource(dev
, slave
, RES_VLAN
, 1, port
))
1763 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
1765 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, port
);
1769 res
->port
= (u8
) port
;
1770 res
->vlan_index
= vlan_index
;
1772 list_add_tail(&res
->list
,
1773 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
]);
1778 static void vlan_del_from_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
1781 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1782 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1783 struct list_head
*vlan_list
=
1784 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
1785 struct vlan_res
*res
, *tmp
;
1787 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
1788 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
1789 if (!--res
->ref_count
) {
1790 list_del(&res
->list
);
1791 mlx4_release_resource(dev
, slave
, RES_VLAN
,
1800 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
)
1802 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1803 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1804 struct list_head
*vlan_list
=
1805 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
1806 struct vlan_res
*res
, *tmp
;
1809 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
1810 list_del(&res
->list
);
1811 /* dereference the vlan the num times the slave referenced it */
1812 for (i
= 0; i
< res
->ref_count
; i
++)
1813 __mlx4_unregister_vlan(dev
, res
->port
, res
->vlan
);
1814 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, res
->port
);
1819 static int vlan_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1820 u64 in_param
, u64
*out_param
, int in_port
)
1822 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1823 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
1829 port
= !in_port
? get_param_l(out_param
) : in_port
;
1831 if (!port
|| op
!= RES_OP_RESERVE_AND_MAP
)
1834 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1835 if (!in_port
&& port
> 0 && port
<= dev
->caps
.num_ports
) {
1836 slave_state
[slave
].old_vlan_api
= true;
1840 vlan
= (u16
) in_param
;
1842 err
= __mlx4_register_vlan(dev
, port
, vlan
, &vlan_index
);
1844 set_param_l(out_param
, (u32
) vlan_index
);
1845 err
= vlan_add_to_slave(dev
, slave
, vlan
, port
, vlan_index
);
1847 __mlx4_unregister_vlan(dev
, port
, vlan
);
1852 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1853 u64 in_param
, u64
*out_param
)
1858 if (op
!= RES_OP_RESERVE
)
1861 err
= mlx4_grant_resource(dev
, slave
, RES_COUNTER
, 1, 0);
1865 err
= __mlx4_counter_alloc(dev
, &index
);
1867 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
1871 err
= add_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
1873 __mlx4_counter_free(dev
, index
);
1874 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
1876 set_param_l(out_param
, index
);
1882 static int xrcdn_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1883 u64 in_param
, u64
*out_param
)
1888 if (op
!= RES_OP_RESERVE
)
1891 err
= __mlx4_xrcd_alloc(dev
, &xrcdn
);
1895 err
= add_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
1897 __mlx4_xrcd_free(dev
, xrcdn
);
1899 set_param_l(out_param
, xrcdn
);
1904 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
1905 struct mlx4_vhcr
*vhcr
,
1906 struct mlx4_cmd_mailbox
*inbox
,
1907 struct mlx4_cmd_mailbox
*outbox
,
1908 struct mlx4_cmd_info
*cmd
)
1911 int alop
= vhcr
->op_modifier
;
1913 switch (vhcr
->in_modifier
& 0xFF) {
1915 err
= qp_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1916 vhcr
->in_param
, &vhcr
->out_param
);
1920 err
= mtt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1921 vhcr
->in_param
, &vhcr
->out_param
);
1925 err
= mpt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1926 vhcr
->in_param
, &vhcr
->out_param
);
1930 err
= cq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1931 vhcr
->in_param
, &vhcr
->out_param
);
1935 err
= srq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1936 vhcr
->in_param
, &vhcr
->out_param
);
1940 err
= mac_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1941 vhcr
->in_param
, &vhcr
->out_param
,
1942 (vhcr
->in_modifier
>> 8) & 0xFF);
1946 err
= vlan_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1947 vhcr
->in_param
, &vhcr
->out_param
,
1948 (vhcr
->in_modifier
>> 8) & 0xFF);
1952 err
= counter_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1953 vhcr
->in_param
, &vhcr
->out_param
);
1957 err
= xrcdn_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1958 vhcr
->in_param
, &vhcr
->out_param
);
1969 static int qp_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1978 case RES_OP_RESERVE
:
1979 base
= get_param_l(&in_param
) & 0x7fffff;
1980 count
= get_param_h(&in_param
);
1981 err
= rem_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1984 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1985 __mlx4_qp_release_range(dev
, base
, count
);
1987 case RES_OP_MAP_ICM
:
1988 qpn
= get_param_l(&in_param
) & 0x7fffff;
1989 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_RESERVED
,
1994 if (!fw_reserved(dev
, qpn
))
1995 __mlx4_qp_free_icm(dev
, qpn
);
1997 res_end_move(dev
, slave
, RES_QP
, qpn
);
1999 if (valid_reserved(dev
, slave
, qpn
))
2000 err
= rem_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
2009 static int mtt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2010 u64 in_param
, u64
*out_param
)
2016 if (op
!= RES_OP_RESERVE_AND_MAP
)
2019 base
= get_param_l(&in_param
);
2020 order
= get_param_h(&in_param
);
2021 err
= rem_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
2023 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
2024 __mlx4_free_mtt_range(dev
, base
, order
);
2029 static int mpt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2035 struct res_mpt
*mpt
;
2038 case RES_OP_RESERVE
:
2039 index
= get_param_l(&in_param
);
2040 id
= index
& mpt_mask(dev
);
2041 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2045 put_res(dev
, slave
, id
, RES_MPT
);
2047 err
= rem_res_range(dev
, slave
, id
, 1, RES_MPT
, 0);
2050 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
2051 __mlx4_mpt_release(dev
, index
);
2053 case RES_OP_MAP_ICM
:
2054 index
= get_param_l(&in_param
);
2055 id
= index
& mpt_mask(dev
);
2056 err
= mr_res_start_move_to(dev
, slave
, id
,
2057 RES_MPT_RESERVED
, &mpt
);
2061 __mlx4_mpt_free_icm(dev
, mpt
->key
);
2062 res_end_move(dev
, slave
, RES_MPT
, id
);
2072 static int cq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2073 u64 in_param
, u64
*out_param
)
2079 case RES_OP_RESERVE_AND_MAP
:
2080 cqn
= get_param_l(&in_param
);
2081 err
= rem_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
2085 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
2086 __mlx4_cq_free_icm(dev
, cqn
);
2097 static int srq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2098 u64 in_param
, u64
*out_param
)
2104 case RES_OP_RESERVE_AND_MAP
:
2105 srqn
= get_param_l(&in_param
);
2106 err
= rem_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
2110 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
2111 __mlx4_srq_free_icm(dev
, srqn
);
2122 static int mac_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2123 u64 in_param
, u64
*out_param
, int in_port
)
2129 case RES_OP_RESERVE_AND_MAP
:
2130 port
= !in_port
? get_param_l(out_param
) : in_port
;
2131 mac_del_from_slave(dev
, slave
, in_param
, port
);
2132 __mlx4_unregister_mac(dev
, port
, in_param
);
2143 static int vlan_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2144 u64 in_param
, u64
*out_param
, int port
)
2146 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2147 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2151 case RES_OP_RESERVE_AND_MAP
:
2152 if (slave_state
[slave
].old_vlan_api
)
2156 vlan_del_from_slave(dev
, slave
, in_param
, port
);
2157 __mlx4_unregister_vlan(dev
, port
, in_param
);
2167 static int counter_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2168 u64 in_param
, u64
*out_param
)
2173 if (op
!= RES_OP_RESERVE
)
2176 index
= get_param_l(&in_param
);
2177 err
= rem_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
2181 __mlx4_counter_free(dev
, index
);
2182 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2187 static int xrcdn_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2188 u64 in_param
, u64
*out_param
)
2193 if (op
!= RES_OP_RESERVE
)
2196 xrcdn
= get_param_l(&in_param
);
2197 err
= rem_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2201 __mlx4_xrcd_free(dev
, xrcdn
);
2206 int mlx4_FREE_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2207 struct mlx4_vhcr
*vhcr
,
2208 struct mlx4_cmd_mailbox
*inbox
,
2209 struct mlx4_cmd_mailbox
*outbox
,
2210 struct mlx4_cmd_info
*cmd
)
2213 int alop
= vhcr
->op_modifier
;
2215 switch (vhcr
->in_modifier
& 0xFF) {
2217 err
= qp_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2222 err
= mtt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2223 vhcr
->in_param
, &vhcr
->out_param
);
2227 err
= mpt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2232 err
= cq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2233 vhcr
->in_param
, &vhcr
->out_param
);
2237 err
= srq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2238 vhcr
->in_param
, &vhcr
->out_param
);
2242 err
= mac_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2243 vhcr
->in_param
, &vhcr
->out_param
,
2244 (vhcr
->in_modifier
>> 8) & 0xFF);
2248 err
= vlan_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2249 vhcr
->in_param
, &vhcr
->out_param
,
2250 (vhcr
->in_modifier
>> 8) & 0xFF);
2254 err
= counter_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2255 vhcr
->in_param
, &vhcr
->out_param
);
2259 err
= xrcdn_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2260 vhcr
->in_param
, &vhcr
->out_param
);
2268 /* ugly but other choices are uglier */
2269 static int mr_phys_mpt(struct mlx4_mpt_entry
*mpt
)
2271 return (be32_to_cpu(mpt
->flags
) >> 9) & 1;
2274 static int mr_get_mtt_addr(struct mlx4_mpt_entry
*mpt
)
2276 return (int)be64_to_cpu(mpt
->mtt_addr
) & 0xfffffff8;
2279 static int mr_get_mtt_size(struct mlx4_mpt_entry
*mpt
)
2281 return be32_to_cpu(mpt
->mtt_sz
);
2284 static u32
mr_get_pd(struct mlx4_mpt_entry
*mpt
)
2286 return be32_to_cpu(mpt
->pd_flags
) & 0x00ffffff;
2289 static int mr_is_fmr(struct mlx4_mpt_entry
*mpt
)
2291 return be32_to_cpu(mpt
->pd_flags
) & MLX4_MPT_PD_FLAG_FAST_REG
;
2294 static int mr_is_bind_enabled(struct mlx4_mpt_entry
*mpt
)
2296 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_BIND_ENABLE
;
2299 static int mr_is_region(struct mlx4_mpt_entry
*mpt
)
2301 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_REGION
;
2304 static int qp_get_mtt_addr(struct mlx4_qp_context
*qpc
)
2306 return be32_to_cpu(qpc
->mtt_base_addr_l
) & 0xfffffff8;
2309 static int srq_get_mtt_addr(struct mlx4_srq_context
*srqc
)
2311 return be32_to_cpu(srqc
->mtt_base_addr_l
) & 0xfffffff8;
2314 static int qp_get_mtt_size(struct mlx4_qp_context
*qpc
)
2316 int page_shift
= (qpc
->log_page_size
& 0x3f) + 12;
2317 int log_sq_size
= (qpc
->sq_size_stride
>> 3) & 0xf;
2318 int log_sq_sride
= qpc
->sq_size_stride
& 7;
2319 int log_rq_size
= (qpc
->rq_size_stride
>> 3) & 0xf;
2320 int log_rq_stride
= qpc
->rq_size_stride
& 7;
2321 int srq
= (be32_to_cpu(qpc
->srqn
) >> 24) & 1;
2322 int rss
= (be32_to_cpu(qpc
->flags
) >> 13) & 1;
2323 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
2324 int xrc
= (ts
== MLX4_QP_ST_XRC
) ? 1 : 0;
2329 int page_offset
= (be32_to_cpu(qpc
->params2
) >> 6) & 0x3f;
2331 sq_size
= 1 << (log_sq_size
+ log_sq_sride
+ 4);
2332 rq_size
= (srq
|rss
|xrc
) ? 0 : (1 << (log_rq_size
+ log_rq_stride
+ 4));
2333 total_mem
= sq_size
+ rq_size
;
2335 roundup_pow_of_two((total_mem
+ (page_offset
<< 6)) >>
2341 static int check_mtt_range(struct mlx4_dev
*dev
, int slave
, int start
,
2342 int size
, struct res_mtt
*mtt
)
2344 int res_start
= mtt
->com
.res_id
;
2345 int res_size
= (1 << mtt
->order
);
2347 if (start
< res_start
|| start
+ size
> res_start
+ res_size
)
2352 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2353 struct mlx4_vhcr
*vhcr
,
2354 struct mlx4_cmd_mailbox
*inbox
,
2355 struct mlx4_cmd_mailbox
*outbox
,
2356 struct mlx4_cmd_info
*cmd
)
2359 int index
= vhcr
->in_modifier
;
2360 struct res_mtt
*mtt
;
2361 struct res_mpt
*mpt
;
2362 int mtt_base
= mr_get_mtt_addr(inbox
->buf
) / dev
->caps
.mtt_entry_sz
;
2368 id
= index
& mpt_mask(dev
);
2369 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_HW
, &mpt
);
2373 /* Disable memory windows for VFs. */
2374 if (!mr_is_region(inbox
->buf
)) {
2379 /* Make sure that the PD bits related to the slave id are zeros. */
2380 pd
= mr_get_pd(inbox
->buf
);
2381 pd_slave
= (pd
>> 17) & 0x7f;
2382 if (pd_slave
!= 0 && pd_slave
!= slave
) {
2387 if (mr_is_fmr(inbox
->buf
)) {
2388 /* FMR and Bind Enable are forbidden in slave devices. */
2389 if (mr_is_bind_enabled(inbox
->buf
)) {
2393 /* FMR and Memory Windows are also forbidden. */
2394 if (!mr_is_region(inbox
->buf
)) {
2400 phys
= mr_phys_mpt(inbox
->buf
);
2402 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2406 err
= check_mtt_range(dev
, slave
, mtt_base
,
2407 mr_get_mtt_size(inbox
->buf
), mtt
);
2414 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2419 atomic_inc(&mtt
->ref_count
);
2420 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2423 res_end_move(dev
, slave
, RES_MPT
, id
);
2428 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2430 res_abort_move(dev
, slave
, RES_MPT
, id
);
2435 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2436 struct mlx4_vhcr
*vhcr
,
2437 struct mlx4_cmd_mailbox
*inbox
,
2438 struct mlx4_cmd_mailbox
*outbox
,
2439 struct mlx4_cmd_info
*cmd
)
2442 int index
= vhcr
->in_modifier
;
2443 struct res_mpt
*mpt
;
2446 id
= index
& mpt_mask(dev
);
2447 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_MAPPED
, &mpt
);
2451 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2456 atomic_dec(&mpt
->mtt
->ref_count
);
2458 res_end_move(dev
, slave
, RES_MPT
, id
);
2462 res_abort_move(dev
, slave
, RES_MPT
, id
);
2467 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2468 struct mlx4_vhcr
*vhcr
,
2469 struct mlx4_cmd_mailbox
*inbox
,
2470 struct mlx4_cmd_mailbox
*outbox
,
2471 struct mlx4_cmd_info
*cmd
)
2474 int index
= vhcr
->in_modifier
;
2475 struct res_mpt
*mpt
;
2478 id
= index
& mpt_mask(dev
);
2479 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2483 if (mpt
->com
.from_state
!= RES_MPT_HW
) {
2488 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2491 put_res(dev
, slave
, id
, RES_MPT
);
2495 static int qp_get_rcqn(struct mlx4_qp_context
*qpc
)
2497 return be32_to_cpu(qpc
->cqn_recv
) & 0xffffff;
2500 static int qp_get_scqn(struct mlx4_qp_context
*qpc
)
2502 return be32_to_cpu(qpc
->cqn_send
) & 0xffffff;
2505 static u32
qp_get_srqn(struct mlx4_qp_context
*qpc
)
2507 return be32_to_cpu(qpc
->srqn
) & 0x1ffffff;
2510 static void adjust_proxy_tun_qkey(struct mlx4_dev
*dev
, struct mlx4_vhcr
*vhcr
,
2511 struct mlx4_qp_context
*context
)
2513 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
2516 if (mlx4_get_parav_qkey(dev
, qpn
, &qkey
))
2519 /* adjust qkey in qp context */
2520 context
->qkey
= cpu_to_be32(qkey
);
2523 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2524 struct mlx4_vhcr
*vhcr
,
2525 struct mlx4_cmd_mailbox
*inbox
,
2526 struct mlx4_cmd_mailbox
*outbox
,
2527 struct mlx4_cmd_info
*cmd
)
2530 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2531 struct res_mtt
*mtt
;
2533 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
2534 int mtt_base
= qp_get_mtt_addr(qpc
) / dev
->caps
.mtt_entry_sz
;
2535 int mtt_size
= qp_get_mtt_size(qpc
);
2538 int rcqn
= qp_get_rcqn(qpc
);
2539 int scqn
= qp_get_scqn(qpc
);
2540 u32 srqn
= qp_get_srqn(qpc
) & 0xffffff;
2541 int use_srq
= (qp_get_srqn(qpc
) >> 24) & 1;
2542 struct res_srq
*srq
;
2543 int local_qpn
= be32_to_cpu(qpc
->local_qpn
) & 0xffffff;
2545 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_HW
, &qp
, 0);
2548 qp
->local_qpn
= local_qpn
;
2549 qp
->sched_queue
= 0;
2551 qp
->vlan_control
= 0;
2553 qp
->pri_path_fl
= 0;
2556 qp
->qpc_flags
= be32_to_cpu(qpc
->flags
);
2558 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2562 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2566 err
= get_res(dev
, slave
, rcqn
, RES_CQ
, &rcq
);
2571 err
= get_res(dev
, slave
, scqn
, RES_CQ
, &scq
);
2578 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2583 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
2584 update_pkey_index(dev
, slave
, inbox
);
2585 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2588 atomic_inc(&mtt
->ref_count
);
2590 atomic_inc(&rcq
->ref_count
);
2592 atomic_inc(&scq
->ref_count
);
2596 put_res(dev
, slave
, scqn
, RES_CQ
);
2599 atomic_inc(&srq
->ref_count
);
2600 put_res(dev
, slave
, srqn
, RES_SRQ
);
2603 put_res(dev
, slave
, rcqn
, RES_CQ
);
2604 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2605 res_end_move(dev
, slave
, RES_QP
, qpn
);
2611 put_res(dev
, slave
, srqn
, RES_SRQ
);
2614 put_res(dev
, slave
, scqn
, RES_CQ
);
2616 put_res(dev
, slave
, rcqn
, RES_CQ
);
2618 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2620 res_abort_move(dev
, slave
, RES_QP
, qpn
);
2625 static int eq_get_mtt_addr(struct mlx4_eq_context
*eqc
)
2627 return be32_to_cpu(eqc
->mtt_base_addr_l
) & 0xfffffff8;
2630 static int eq_get_mtt_size(struct mlx4_eq_context
*eqc
)
2632 int log_eq_size
= eqc
->log_eq_size
& 0x1f;
2633 int page_shift
= (eqc
->log_page_size
& 0x3f) + 12;
2635 if (log_eq_size
+ 5 < page_shift
)
2638 return 1 << (log_eq_size
+ 5 - page_shift
);
2641 static int cq_get_mtt_addr(struct mlx4_cq_context
*cqc
)
2643 return be32_to_cpu(cqc
->mtt_base_addr_l
) & 0xfffffff8;
2646 static int cq_get_mtt_size(struct mlx4_cq_context
*cqc
)
2648 int log_cq_size
= (be32_to_cpu(cqc
->logsize_usrpage
) >> 24) & 0x1f;
2649 int page_shift
= (cqc
->log_page_size
& 0x3f) + 12;
2651 if (log_cq_size
+ 5 < page_shift
)
2654 return 1 << (log_cq_size
+ 5 - page_shift
);
2657 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2658 struct mlx4_vhcr
*vhcr
,
2659 struct mlx4_cmd_mailbox
*inbox
,
2660 struct mlx4_cmd_mailbox
*outbox
,
2661 struct mlx4_cmd_info
*cmd
)
2664 int eqn
= vhcr
->in_modifier
;
2665 int res_id
= (slave
<< 8) | eqn
;
2666 struct mlx4_eq_context
*eqc
= inbox
->buf
;
2667 int mtt_base
= eq_get_mtt_addr(eqc
) / dev
->caps
.mtt_entry_sz
;
2668 int mtt_size
= eq_get_mtt_size(eqc
);
2670 struct res_mtt
*mtt
;
2672 err
= add_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2675 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_HW
, &eq
);
2679 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2683 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2687 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2691 atomic_inc(&mtt
->ref_count
);
2693 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2694 res_end_move(dev
, slave
, RES_EQ
, res_id
);
2698 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2700 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
2702 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2706 static int get_containing_mtt(struct mlx4_dev
*dev
, int slave
, int start
,
2707 int len
, struct res_mtt
**res
)
2709 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2710 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2711 struct res_mtt
*mtt
;
2714 spin_lock_irq(mlx4_tlock(dev
));
2715 list_for_each_entry(mtt
, &tracker
->slave_list
[slave
].res_list
[RES_MTT
],
2717 if (!check_mtt_range(dev
, slave
, start
, len
, mtt
)) {
2719 mtt
->com
.from_state
= mtt
->com
.state
;
2720 mtt
->com
.state
= RES_MTT_BUSY
;
2725 spin_unlock_irq(mlx4_tlock(dev
));
2730 static int verify_qp_parameters(struct mlx4_dev
*dev
,
2731 struct mlx4_cmd_mailbox
*inbox
,
2732 enum qp_transition transition
, u8 slave
)
2735 struct mlx4_qp_context
*qp_ctx
;
2736 enum mlx4_qp_optpar optpar
;
2738 qp_ctx
= inbox
->buf
+ 8;
2739 qp_type
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
2740 optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
2745 switch (transition
) {
2746 case QP_TRANS_INIT2RTR
:
2747 case QP_TRANS_RTR2RTS
:
2748 case QP_TRANS_RTS2RTS
:
2749 case QP_TRANS_SQD2SQD
:
2750 case QP_TRANS_SQD2RTS
:
2751 if (slave
!= mlx4_master_func_num(dev
))
2752 /* slaves have only gid index 0 */
2753 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
)
2754 if (qp_ctx
->pri_path
.mgid_index
)
2756 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
)
2757 if (qp_ctx
->alt_path
.mgid_index
)
2772 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev
*dev
, int slave
,
2773 struct mlx4_vhcr
*vhcr
,
2774 struct mlx4_cmd_mailbox
*inbox
,
2775 struct mlx4_cmd_mailbox
*outbox
,
2776 struct mlx4_cmd_info
*cmd
)
2778 struct mlx4_mtt mtt
;
2779 __be64
*page_list
= inbox
->buf
;
2780 u64
*pg_list
= (u64
*)page_list
;
2782 struct res_mtt
*rmtt
= NULL
;
2783 int start
= be64_to_cpu(page_list
[0]);
2784 int npages
= vhcr
->in_modifier
;
2787 err
= get_containing_mtt(dev
, slave
, start
, npages
, &rmtt
);
2791 /* Call the SW implementation of write_mtt:
2792 * - Prepare a dummy mtt struct
2793 * - Translate inbox contents to simple addresses in host endianess */
2794 mtt
.offset
= 0; /* TBD this is broken but I don't handle it since
2795 we don't really use it */
2798 for (i
= 0; i
< npages
; ++i
)
2799 pg_list
[i
+ 2] = (be64_to_cpu(page_list
[i
+ 2]) & ~1ULL);
2801 err
= __mlx4_write_mtt(dev
, &mtt
, be64_to_cpu(page_list
[0]), npages
,
2802 ((u64
*)page_list
+ 2));
2805 put_res(dev
, slave
, rmtt
->com
.res_id
, RES_MTT
);
2810 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2811 struct mlx4_vhcr
*vhcr
,
2812 struct mlx4_cmd_mailbox
*inbox
,
2813 struct mlx4_cmd_mailbox
*outbox
,
2814 struct mlx4_cmd_info
*cmd
)
2816 int eqn
= vhcr
->in_modifier
;
2817 int res_id
= eqn
| (slave
<< 8);
2821 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_RESERVED
, &eq
);
2825 err
= get_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
, NULL
);
2829 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2833 atomic_dec(&eq
->mtt
->ref_count
);
2834 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
2835 res_end_move(dev
, slave
, RES_EQ
, res_id
);
2836 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2841 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
2843 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
2848 int mlx4_GEN_EQE(struct mlx4_dev
*dev
, int slave
, struct mlx4_eqe
*eqe
)
2850 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2851 struct mlx4_slave_event_eq_info
*event_eq
;
2852 struct mlx4_cmd_mailbox
*mailbox
;
2853 u32 in_modifier
= 0;
2858 if (!priv
->mfunc
.master
.slave_state
)
2861 event_eq
= &priv
->mfunc
.master
.slave_state
[slave
].event_eq
[eqe
->type
];
2863 /* Create the event only if the slave is registered */
2864 if (event_eq
->eqn
< 0)
2867 mutex_lock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
2868 res_id
= (slave
<< 8) | event_eq
->eqn
;
2869 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &req
);
2873 if (req
->com
.from_state
!= RES_EQ_HW
) {
2878 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2879 if (IS_ERR(mailbox
)) {
2880 err
= PTR_ERR(mailbox
);
2884 if (eqe
->type
== MLX4_EVENT_TYPE_CMD
) {
2886 eqe
->event
.cmd
.token
= cpu_to_be16(event_eq
->token
);
2889 memcpy(mailbox
->buf
, (u8
*) eqe
, 28);
2891 in_modifier
= (slave
& 0xff) | ((event_eq
->eqn
& 0xff) << 16);
2893 err
= mlx4_cmd(dev
, mailbox
->dma
, in_modifier
, 0,
2894 MLX4_CMD_GEN_EQE
, MLX4_CMD_TIME_CLASS_B
,
2897 put_res(dev
, slave
, res_id
, RES_EQ
);
2898 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
2899 mlx4_free_cmd_mailbox(dev
, mailbox
);
2903 put_res(dev
, slave
, res_id
, RES_EQ
);
2906 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
2910 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2911 struct mlx4_vhcr
*vhcr
,
2912 struct mlx4_cmd_mailbox
*inbox
,
2913 struct mlx4_cmd_mailbox
*outbox
,
2914 struct mlx4_cmd_info
*cmd
)
2916 int eqn
= vhcr
->in_modifier
;
2917 int res_id
= eqn
| (slave
<< 8);
2921 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &eq
);
2925 if (eq
->com
.from_state
!= RES_EQ_HW
) {
2930 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2933 put_res(dev
, slave
, res_id
, RES_EQ
);
2937 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2938 struct mlx4_vhcr
*vhcr
,
2939 struct mlx4_cmd_mailbox
*inbox
,
2940 struct mlx4_cmd_mailbox
*outbox
,
2941 struct mlx4_cmd_info
*cmd
)
2944 int cqn
= vhcr
->in_modifier
;
2945 struct mlx4_cq_context
*cqc
= inbox
->buf
;
2946 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
2948 struct res_mtt
*mtt
;
2950 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_HW
, &cq
);
2953 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2956 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
2959 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2962 atomic_inc(&mtt
->ref_count
);
2964 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2965 res_end_move(dev
, slave
, RES_CQ
, cqn
);
2969 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2971 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
2975 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2976 struct mlx4_vhcr
*vhcr
,
2977 struct mlx4_cmd_mailbox
*inbox
,
2978 struct mlx4_cmd_mailbox
*outbox
,
2979 struct mlx4_cmd_info
*cmd
)
2982 int cqn
= vhcr
->in_modifier
;
2985 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_ALLOCATED
, &cq
);
2988 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2991 atomic_dec(&cq
->mtt
->ref_count
);
2992 res_end_move(dev
, slave
, RES_CQ
, cqn
);
2996 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3000 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3001 struct mlx4_vhcr
*vhcr
,
3002 struct mlx4_cmd_mailbox
*inbox
,
3003 struct mlx4_cmd_mailbox
*outbox
,
3004 struct mlx4_cmd_info
*cmd
)
3006 int cqn
= vhcr
->in_modifier
;
3010 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3014 if (cq
->com
.from_state
!= RES_CQ_HW
)
3017 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3019 put_res(dev
, slave
, cqn
, RES_CQ
);
3024 static int handle_resize(struct mlx4_dev
*dev
, int slave
,
3025 struct mlx4_vhcr
*vhcr
,
3026 struct mlx4_cmd_mailbox
*inbox
,
3027 struct mlx4_cmd_mailbox
*outbox
,
3028 struct mlx4_cmd_info
*cmd
,
3032 struct res_mtt
*orig_mtt
;
3033 struct res_mtt
*mtt
;
3034 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3035 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3037 err
= get_res(dev
, slave
, cq
->mtt
->com
.res_id
, RES_MTT
, &orig_mtt
);
3041 if (orig_mtt
!= cq
->mtt
) {
3046 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3050 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3053 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3056 atomic_dec(&orig_mtt
->ref_count
);
3057 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3058 atomic_inc(&mtt
->ref_count
);
3060 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3064 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3066 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3072 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3073 struct mlx4_vhcr
*vhcr
,
3074 struct mlx4_cmd_mailbox
*inbox
,
3075 struct mlx4_cmd_mailbox
*outbox
,
3076 struct mlx4_cmd_info
*cmd
)
3078 int cqn
= vhcr
->in_modifier
;
3082 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3086 if (cq
->com
.from_state
!= RES_CQ_HW
)
3089 if (vhcr
->op_modifier
== 0) {
3090 err
= handle_resize(dev
, slave
, vhcr
, inbox
, outbox
, cmd
, cq
);
3094 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3096 put_res(dev
, slave
, cqn
, RES_CQ
);
3101 static int srq_get_mtt_size(struct mlx4_srq_context
*srqc
)
3103 int log_srq_size
= (be32_to_cpu(srqc
->state_logsize_srqn
) >> 24) & 0xf;
3104 int log_rq_stride
= srqc
->logstride
& 7;
3105 int page_shift
= (srqc
->log_page_size
& 0x3f) + 12;
3107 if (log_srq_size
+ log_rq_stride
+ 4 < page_shift
)
3110 return 1 << (log_srq_size
+ log_rq_stride
+ 4 - page_shift
);
3113 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3114 struct mlx4_vhcr
*vhcr
,
3115 struct mlx4_cmd_mailbox
*inbox
,
3116 struct mlx4_cmd_mailbox
*outbox
,
3117 struct mlx4_cmd_info
*cmd
)
3120 int srqn
= vhcr
->in_modifier
;
3121 struct res_mtt
*mtt
;
3122 struct res_srq
*srq
;
3123 struct mlx4_srq_context
*srqc
= inbox
->buf
;
3124 int mtt_base
= srq_get_mtt_addr(srqc
) / dev
->caps
.mtt_entry_sz
;
3126 if (srqn
!= (be32_to_cpu(srqc
->state_logsize_srqn
) & 0xffffff))
3129 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_HW
, &srq
);
3132 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3135 err
= check_mtt_range(dev
, slave
, mtt_base
, srq_get_mtt_size(srqc
),
3140 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3144 atomic_inc(&mtt
->ref_count
);
3146 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3147 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3151 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3153 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3158 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3159 struct mlx4_vhcr
*vhcr
,
3160 struct mlx4_cmd_mailbox
*inbox
,
3161 struct mlx4_cmd_mailbox
*outbox
,
3162 struct mlx4_cmd_info
*cmd
)
3165 int srqn
= vhcr
->in_modifier
;
3166 struct res_srq
*srq
;
3168 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_ALLOCATED
, &srq
);
3171 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3174 atomic_dec(&srq
->mtt
->ref_count
);
3176 atomic_dec(&srq
->cq
->ref_count
);
3177 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3182 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3187 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3188 struct mlx4_vhcr
*vhcr
,
3189 struct mlx4_cmd_mailbox
*inbox
,
3190 struct mlx4_cmd_mailbox
*outbox
,
3191 struct mlx4_cmd_info
*cmd
)
3194 int srqn
= vhcr
->in_modifier
;
3195 struct res_srq
*srq
;
3197 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3200 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3204 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3206 put_res(dev
, slave
, srqn
, RES_SRQ
);
3210 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3211 struct mlx4_vhcr
*vhcr
,
3212 struct mlx4_cmd_mailbox
*inbox
,
3213 struct mlx4_cmd_mailbox
*outbox
,
3214 struct mlx4_cmd_info
*cmd
)
3217 int srqn
= vhcr
->in_modifier
;
3218 struct res_srq
*srq
;
3220 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3224 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3229 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3231 put_res(dev
, slave
, srqn
, RES_SRQ
);
3235 int mlx4_GEN_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3236 struct mlx4_vhcr
*vhcr
,
3237 struct mlx4_cmd_mailbox
*inbox
,
3238 struct mlx4_cmd_mailbox
*outbox
,
3239 struct mlx4_cmd_info
*cmd
)
3242 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3245 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3248 if (qp
->com
.from_state
!= RES_QP_HW
) {
3253 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3255 put_res(dev
, slave
, qpn
, RES_QP
);
3259 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3260 struct mlx4_vhcr
*vhcr
,
3261 struct mlx4_cmd_mailbox
*inbox
,
3262 struct mlx4_cmd_mailbox
*outbox
,
3263 struct mlx4_cmd_info
*cmd
)
3265 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3266 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3267 update_pkey_index(dev
, slave
, inbox
);
3268 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3271 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3272 struct mlx4_vhcr
*vhcr
,
3273 struct mlx4_cmd_mailbox
*inbox
,
3274 struct mlx4_cmd_mailbox
*outbox
,
3275 struct mlx4_cmd_info
*cmd
)
3278 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
3279 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3281 u8 orig_sched_queue
;
3282 __be32 orig_param3
= qpc
->param3
;
3283 u8 orig_vlan_control
= qpc
->pri_path
.vlan_control
;
3284 u8 orig_fvl_rx
= qpc
->pri_path
.fvl_rx
;
3285 u8 orig_pri_path_fl
= qpc
->pri_path
.fl
;
3286 u8 orig_vlan_index
= qpc
->pri_path
.vlan_index
;
3287 u8 orig_feup
= qpc
->pri_path
.feup
;
3289 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_INIT2RTR
, slave
);
3293 update_pkey_index(dev
, slave
, inbox
);
3294 update_gid(dev
, inbox
, (u8
)slave
);
3295 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
3296 orig_sched_queue
= qpc
->pri_path
.sched_queue
;
3297 err
= update_vport_qp_param(dev
, inbox
, slave
, qpn
);
3301 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3304 if (qp
->com
.from_state
!= RES_QP_HW
) {
3309 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3311 /* if no error, save sched queue value passed in by VF. This is
3312 * essentially the QOS value provided by the VF. This will be useful
3313 * if we allow dynamic changes from VST back to VGT
3316 qp
->sched_queue
= orig_sched_queue
;
3317 qp
->param3
= orig_param3
;
3318 qp
->vlan_control
= orig_vlan_control
;
3319 qp
->fvl_rx
= orig_fvl_rx
;
3320 qp
->pri_path_fl
= orig_pri_path_fl
;
3321 qp
->vlan_index
= orig_vlan_index
;
3322 qp
->feup
= orig_feup
;
3324 put_res(dev
, slave
, qpn
, RES_QP
);
3328 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3329 struct mlx4_vhcr
*vhcr
,
3330 struct mlx4_cmd_mailbox
*inbox
,
3331 struct mlx4_cmd_mailbox
*outbox
,
3332 struct mlx4_cmd_info
*cmd
)
3335 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3337 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_RTR2RTS
, slave
);
3341 update_pkey_index(dev
, slave
, inbox
);
3342 update_gid(dev
, inbox
, (u8
)slave
);
3343 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3344 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3347 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3348 struct mlx4_vhcr
*vhcr
,
3349 struct mlx4_cmd_mailbox
*inbox
,
3350 struct mlx4_cmd_mailbox
*outbox
,
3351 struct mlx4_cmd_info
*cmd
)
3354 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3356 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_RTS2RTS
, slave
);
3360 update_pkey_index(dev
, slave
, inbox
);
3361 update_gid(dev
, inbox
, (u8
)slave
);
3362 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3363 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3367 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3368 struct mlx4_vhcr
*vhcr
,
3369 struct mlx4_cmd_mailbox
*inbox
,
3370 struct mlx4_cmd_mailbox
*outbox
,
3371 struct mlx4_cmd_info
*cmd
)
3373 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3374 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3375 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3378 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3379 struct mlx4_vhcr
*vhcr
,
3380 struct mlx4_cmd_mailbox
*inbox
,
3381 struct mlx4_cmd_mailbox
*outbox
,
3382 struct mlx4_cmd_info
*cmd
)
3385 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3387 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_SQD2SQD
, slave
);
3391 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3392 update_gid(dev
, inbox
, (u8
)slave
);
3393 update_pkey_index(dev
, slave
, inbox
);
3394 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3397 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3398 struct mlx4_vhcr
*vhcr
,
3399 struct mlx4_cmd_mailbox
*inbox
,
3400 struct mlx4_cmd_mailbox
*outbox
,
3401 struct mlx4_cmd_info
*cmd
)
3404 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3406 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_SQD2RTS
, slave
);
3410 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3411 update_gid(dev
, inbox
, (u8
)slave
);
3412 update_pkey_index(dev
, slave
, inbox
);
3413 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3416 int mlx4_2RST_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3417 struct mlx4_vhcr
*vhcr
,
3418 struct mlx4_cmd_mailbox
*inbox
,
3419 struct mlx4_cmd_mailbox
*outbox
,
3420 struct mlx4_cmd_info
*cmd
)
3423 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3426 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
, &qp
, 0);
3429 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3433 atomic_dec(&qp
->mtt
->ref_count
);
3434 atomic_dec(&qp
->rcq
->ref_count
);
3435 atomic_dec(&qp
->scq
->ref_count
);
3437 atomic_dec(&qp
->srq
->ref_count
);
3438 res_end_move(dev
, slave
, RES_QP
, qpn
);
3442 res_abort_move(dev
, slave
, RES_QP
, qpn
);
3447 static struct res_gid
*find_gid(struct mlx4_dev
*dev
, int slave
,
3448 struct res_qp
*rqp
, u8
*gid
)
3450 struct res_gid
*res
;
3452 list_for_each_entry(res
, &rqp
->mcg_list
, list
) {
3453 if (!memcmp(res
->gid
, gid
, 16))
3459 static int add_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
3460 u8
*gid
, enum mlx4_protocol prot
,
3461 enum mlx4_steer_type steer
, u64 reg_id
)
3463 struct res_gid
*res
;
3466 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
3470 spin_lock_irq(&rqp
->mcg_spl
);
3471 if (find_gid(dev
, slave
, rqp
, gid
)) {
3475 memcpy(res
->gid
, gid
, 16);
3478 res
->reg_id
= reg_id
;
3479 list_add_tail(&res
->list
, &rqp
->mcg_list
);
3482 spin_unlock_irq(&rqp
->mcg_spl
);
3487 static int rem_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
3488 u8
*gid
, enum mlx4_protocol prot
,
3489 enum mlx4_steer_type steer
, u64
*reg_id
)
3491 struct res_gid
*res
;
3494 spin_lock_irq(&rqp
->mcg_spl
);
3495 res
= find_gid(dev
, slave
, rqp
, gid
);
3496 if (!res
|| res
->prot
!= prot
|| res
->steer
!= steer
)
3499 *reg_id
= res
->reg_id
;
3500 list_del(&res
->list
);
3504 spin_unlock_irq(&rqp
->mcg_spl
);
3509 static int qp_attach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
, u8 gid
[16],
3510 int block_loopback
, enum mlx4_protocol prot
,
3511 enum mlx4_steer_type type
, u64
*reg_id
)
3513 switch (dev
->caps
.steering_mode
) {
3514 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
3515 return mlx4_trans_to_dmfs_attach(dev
, qp
, gid
, gid
[5],
3516 block_loopback
, prot
,
3518 case MLX4_STEERING_MODE_B0
:
3519 return mlx4_qp_attach_common(dev
, qp
, gid
,
3520 block_loopback
, prot
, type
);
3526 static int qp_detach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
, u8 gid
[16],
3527 enum mlx4_protocol prot
, enum mlx4_steer_type type
,
3530 switch (dev
->caps
.steering_mode
) {
3531 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
3532 return mlx4_flow_detach(dev
, reg_id
);
3533 case MLX4_STEERING_MODE_B0
:
3534 return mlx4_qp_detach_common(dev
, qp
, gid
, prot
, type
);
3540 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
3541 struct mlx4_vhcr
*vhcr
,
3542 struct mlx4_cmd_mailbox
*inbox
,
3543 struct mlx4_cmd_mailbox
*outbox
,
3544 struct mlx4_cmd_info
*cmd
)
3546 struct mlx4_qp qp
; /* dummy for calling attach/detach */
3547 u8
*gid
= inbox
->buf
;
3548 enum mlx4_protocol prot
= (vhcr
->in_modifier
>> 28) & 0x7;
3553 int attach
= vhcr
->op_modifier
;
3554 int block_loopback
= vhcr
->in_modifier
>> 31;
3555 u8 steer_type_mask
= 2;
3556 enum mlx4_steer_type type
= (gid
[7] & steer_type_mask
) >> 1;
3558 qpn
= vhcr
->in_modifier
& 0xffffff;
3559 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
3565 err
= qp_attach(dev
, &qp
, gid
, block_loopback
, prot
,
3568 pr_err("Fail to attach rule to qp 0x%x\n", qpn
);
3571 err
= add_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, reg_id
);
3575 err
= rem_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, ®_id
);
3579 err
= qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
3581 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3584 put_res(dev
, slave
, qpn
, RES_QP
);
3588 qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
3590 put_res(dev
, slave
, qpn
, RES_QP
);
3595 * MAC validation for Flow Steering rules.
3596 * VF can attach rules only with a mac address which is assigned to it.
3598 static int validate_eth_header_mac(int slave
, struct _rule_hw
*eth_header
,
3599 struct list_head
*rlist
)
3601 struct mac_res
*res
, *tmp
;
3604 /* make sure it isn't multicast or broadcast mac*/
3605 if (!is_multicast_ether_addr(eth_header
->eth
.dst_mac
) &&
3606 !is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
3607 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
3608 be_mac
= cpu_to_be64(res
->mac
<< 16);
3609 if (ether_addr_equal((u8
*)&be_mac
, eth_header
->eth
.dst_mac
))
3612 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3613 eth_header
->eth
.dst_mac
, slave
);
3620 * In case of missing eth header, append eth header with a MAC address
3621 * assigned to the VF.
3623 static int add_eth_header(struct mlx4_dev
*dev
, int slave
,
3624 struct mlx4_cmd_mailbox
*inbox
,
3625 struct list_head
*rlist
, int header_id
)
3627 struct mac_res
*res
, *tmp
;
3629 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
3630 struct mlx4_net_trans_rule_hw_eth
*eth_header
;
3631 struct mlx4_net_trans_rule_hw_ipv4
*ip_header
;
3632 struct mlx4_net_trans_rule_hw_tcp_udp
*l4_header
;
3634 __be64 mac_msk
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
3636 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
3638 eth_header
= (struct mlx4_net_trans_rule_hw_eth
*)(ctrl
+ 1);
3640 /* Clear a space in the inbox for eth header */
3641 switch (header_id
) {
3642 case MLX4_NET_TRANS_RULE_ID_IPV4
:
3644 (struct mlx4_net_trans_rule_hw_ipv4
*)(eth_header
+ 1);
3645 memmove(ip_header
, eth_header
,
3646 sizeof(*ip_header
) + sizeof(*l4_header
));
3648 case MLX4_NET_TRANS_RULE_ID_TCP
:
3649 case MLX4_NET_TRANS_RULE_ID_UDP
:
3650 l4_header
= (struct mlx4_net_trans_rule_hw_tcp_udp
*)
3652 memmove(l4_header
, eth_header
, sizeof(*l4_header
));
3657 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
3658 if (port
== res
->port
) {
3659 be_mac
= cpu_to_be64(res
->mac
<< 16);
3664 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3669 memset(eth_header
, 0, sizeof(*eth_header
));
3670 eth_header
->size
= sizeof(*eth_header
) >> 2;
3671 eth_header
->id
= cpu_to_be16(__sw_id_hw
[MLX4_NET_TRANS_RULE_ID_ETH
]);
3672 memcpy(eth_header
->dst_mac
, &be_mac
, ETH_ALEN
);
3673 memcpy(eth_header
->dst_mac_msk
, &mac_msk
, ETH_ALEN
);
3679 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
3680 struct mlx4_vhcr
*vhcr
,
3681 struct mlx4_cmd_mailbox
*inbox
,
3682 struct mlx4_cmd_mailbox
*outbox
,
3683 struct mlx4_cmd_info
*cmd
)
3686 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3687 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3688 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
3692 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
3693 struct _rule_hw
*rule_header
;
3696 if (dev
->caps
.steering_mode
!=
3697 MLX4_STEERING_MODE_DEVICE_MANAGED
)
3700 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
3701 qpn
= be32_to_cpu(ctrl
->qpn
) & 0xffffff;
3702 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
3704 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn
);
3707 rule_header
= (struct _rule_hw
*)(ctrl
+ 1);
3708 header_id
= map_hw_to_sw_id(be16_to_cpu(rule_header
->id
));
3710 switch (header_id
) {
3711 case MLX4_NET_TRANS_RULE_ID_ETH
:
3712 if (validate_eth_header_mac(slave
, rule_header
, rlist
)) {
3717 case MLX4_NET_TRANS_RULE_ID_IB
:
3719 case MLX4_NET_TRANS_RULE_ID_IPV4
:
3720 case MLX4_NET_TRANS_RULE_ID_TCP
:
3721 case MLX4_NET_TRANS_RULE_ID_UDP
:
3722 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3723 if (add_eth_header(dev
, slave
, inbox
, rlist
, header_id
)) {
3727 vhcr
->in_modifier
+=
3728 sizeof(struct mlx4_net_trans_rule_hw_eth
) >> 2;
3731 pr_err("Corrupted mailbox.\n");
3736 err
= mlx4_cmd_imm(dev
, inbox
->dma
, &vhcr
->out_param
,
3737 vhcr
->in_modifier
, 0,
3738 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
3743 err
= add_res_range(dev
, slave
, vhcr
->out_param
, 1, RES_FS_RULE
, qpn
);
3745 mlx4_err(dev
, "Fail to add flow steering resources.\n ");
3747 mlx4_cmd(dev
, vhcr
->out_param
, 0, 0,
3748 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
3752 atomic_inc(&rqp
->ref_count
);
3754 put_res(dev
, slave
, qpn
, RES_QP
);
3758 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev
*dev
, int slave
,
3759 struct mlx4_vhcr
*vhcr
,
3760 struct mlx4_cmd_mailbox
*inbox
,
3761 struct mlx4_cmd_mailbox
*outbox
,
3762 struct mlx4_cmd_info
*cmd
)
3766 struct res_fs_rule
*rrule
;
3768 if (dev
->caps
.steering_mode
!=
3769 MLX4_STEERING_MODE_DEVICE_MANAGED
)
3772 err
= get_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
, &rrule
);
3775 /* Release the rule form busy state before removal */
3776 put_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
);
3777 err
= get_res(dev
, slave
, rrule
->qpn
, RES_QP
, &rqp
);
3781 err
= rem_res_range(dev
, slave
, vhcr
->in_param
, 1, RES_FS_RULE
, 0);
3783 mlx4_err(dev
, "Fail to remove flow steering resources.\n ");
3787 err
= mlx4_cmd(dev
, vhcr
->in_param
, 0, 0,
3788 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
3791 atomic_dec(&rqp
->ref_count
);
3793 put_res(dev
, slave
, rrule
->qpn
, RES_QP
);
3798 BUSY_MAX_RETRIES
= 10
3801 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev
*dev
, int slave
,
3802 struct mlx4_vhcr
*vhcr
,
3803 struct mlx4_cmd_mailbox
*inbox
,
3804 struct mlx4_cmd_mailbox
*outbox
,
3805 struct mlx4_cmd_info
*cmd
)
3808 int index
= vhcr
->in_modifier
& 0xffff;
3810 err
= get_res(dev
, slave
, index
, RES_COUNTER
, NULL
);
3814 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3815 put_res(dev
, slave
, index
, RES_COUNTER
);
3819 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev
*dev
, int slave
,
3820 struct mlx4_vhcr
*vhcr
,
3821 struct mlx4_cmd_mailbox
*inbox
,
3822 struct mlx4_cmd_mailbox
*outbox
,
3823 struct mlx4_cmd_info
*cmd
)
3829 static void detach_qp(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
)
3831 struct res_gid
*rgid
;
3832 struct res_gid
*tmp
;
3833 struct mlx4_qp qp
; /* dummy for calling attach/detach */
3835 list_for_each_entry_safe(rgid
, tmp
, &rqp
->mcg_list
, list
) {
3836 switch (dev
->caps
.steering_mode
) {
3837 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
3838 mlx4_flow_detach(dev
, rgid
->reg_id
);
3840 case MLX4_STEERING_MODE_B0
:
3841 qp
.qpn
= rqp
->local_qpn
;
3842 (void) mlx4_qp_detach_common(dev
, &qp
, rgid
->gid
,
3843 rgid
->prot
, rgid
->steer
);
3846 list_del(&rgid
->list
);
3851 static int _move_all_busy(struct mlx4_dev
*dev
, int slave
,
3852 enum mlx4_resource type
, int print
)
3854 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3855 struct mlx4_resource_tracker
*tracker
=
3856 &priv
->mfunc
.master
.res_tracker
;
3857 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[type
];
3858 struct res_common
*r
;
3859 struct res_common
*tmp
;
3863 spin_lock_irq(mlx4_tlock(dev
));
3864 list_for_each_entry_safe(r
, tmp
, rlist
, list
) {
3865 if (r
->owner
== slave
) {
3867 if (r
->state
== RES_ANY_BUSY
) {
3870 "%s id 0x%llx is busy\n",
3875 r
->from_state
= r
->state
;
3876 r
->state
= RES_ANY_BUSY
;
3882 spin_unlock_irq(mlx4_tlock(dev
));
3887 static int move_all_busy(struct mlx4_dev
*dev
, int slave
,
3888 enum mlx4_resource type
)
3890 unsigned long begin
;
3895 busy
= _move_all_busy(dev
, slave
, type
, 0);
3896 if (time_after(jiffies
, begin
+ 5 * HZ
))
3903 busy
= _move_all_busy(dev
, slave
, type
, 1);
3907 static void rem_slave_qps(struct mlx4_dev
*dev
, int slave
)
3909 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3910 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3911 struct list_head
*qp_list
=
3912 &tracker
->slave_list
[slave
].res_list
[RES_QP
];
3920 err
= move_all_busy(dev
, slave
, RES_QP
);
3922 mlx4_warn(dev
, "rem_slave_qps: Could not move all qps to busy"
3923 "for slave %d\n", slave
);
3925 spin_lock_irq(mlx4_tlock(dev
));
3926 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
3927 spin_unlock_irq(mlx4_tlock(dev
));
3928 if (qp
->com
.owner
== slave
) {
3929 qpn
= qp
->com
.res_id
;
3930 detach_qp(dev
, slave
, qp
);
3931 state
= qp
->com
.from_state
;
3932 while (state
!= 0) {
3934 case RES_QP_RESERVED
:
3935 spin_lock_irq(mlx4_tlock(dev
));
3936 rb_erase(&qp
->com
.node
,
3937 &tracker
->res_tree
[RES_QP
]);
3938 list_del(&qp
->com
.list
);
3939 spin_unlock_irq(mlx4_tlock(dev
));
3940 if (!valid_reserved(dev
, slave
, qpn
)) {
3941 __mlx4_qp_release_range(dev
, qpn
, 1);
3942 mlx4_release_resource(dev
, slave
,
3949 if (!valid_reserved(dev
, slave
, qpn
))
3950 __mlx4_qp_free_icm(dev
, qpn
);
3951 state
= RES_QP_RESERVED
;
3955 err
= mlx4_cmd(dev
, in_param
,
3958 MLX4_CMD_TIME_CLASS_A
,
3961 mlx4_dbg(dev
, "rem_slave_qps: failed"
3962 " to move slave %d qpn %d to"
3965 atomic_dec(&qp
->rcq
->ref_count
);
3966 atomic_dec(&qp
->scq
->ref_count
);
3967 atomic_dec(&qp
->mtt
->ref_count
);
3969 atomic_dec(&qp
->srq
->ref_count
);
3970 state
= RES_QP_MAPPED
;
3977 spin_lock_irq(mlx4_tlock(dev
));
3979 spin_unlock_irq(mlx4_tlock(dev
));
3982 static void rem_slave_srqs(struct mlx4_dev
*dev
, int slave
)
3984 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3985 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3986 struct list_head
*srq_list
=
3987 &tracker
->slave_list
[slave
].res_list
[RES_SRQ
];
3988 struct res_srq
*srq
;
3989 struct res_srq
*tmp
;
3996 err
= move_all_busy(dev
, slave
, RES_SRQ
);
3998 mlx4_warn(dev
, "rem_slave_srqs: Could not move all srqs to "
3999 "busy for slave %d\n", slave
);
4001 spin_lock_irq(mlx4_tlock(dev
));
4002 list_for_each_entry_safe(srq
, tmp
, srq_list
, com
.list
) {
4003 spin_unlock_irq(mlx4_tlock(dev
));
4004 if (srq
->com
.owner
== slave
) {
4005 srqn
= srq
->com
.res_id
;
4006 state
= srq
->com
.from_state
;
4007 while (state
!= 0) {
4009 case RES_SRQ_ALLOCATED
:
4010 __mlx4_srq_free_icm(dev
, srqn
);
4011 spin_lock_irq(mlx4_tlock(dev
));
4012 rb_erase(&srq
->com
.node
,
4013 &tracker
->res_tree
[RES_SRQ
]);
4014 list_del(&srq
->com
.list
);
4015 spin_unlock_irq(mlx4_tlock(dev
));
4016 mlx4_release_resource(dev
, slave
,
4024 err
= mlx4_cmd(dev
, in_param
, srqn
, 1,
4026 MLX4_CMD_TIME_CLASS_A
,
4029 mlx4_dbg(dev
, "rem_slave_srqs: failed"
4030 " to move slave %d srq %d to"
4034 atomic_dec(&srq
->mtt
->ref_count
);
4036 atomic_dec(&srq
->cq
->ref_count
);
4037 state
= RES_SRQ_ALLOCATED
;
4045 spin_lock_irq(mlx4_tlock(dev
));
4047 spin_unlock_irq(mlx4_tlock(dev
));
4050 static void rem_slave_cqs(struct mlx4_dev
*dev
, int slave
)
4052 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4053 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4054 struct list_head
*cq_list
=
4055 &tracker
->slave_list
[slave
].res_list
[RES_CQ
];
4064 err
= move_all_busy(dev
, slave
, RES_CQ
);
4066 mlx4_warn(dev
, "rem_slave_cqs: Could not move all cqs to "
4067 "busy for slave %d\n", slave
);
4069 spin_lock_irq(mlx4_tlock(dev
));
4070 list_for_each_entry_safe(cq
, tmp
, cq_list
, com
.list
) {
4071 spin_unlock_irq(mlx4_tlock(dev
));
4072 if (cq
->com
.owner
== slave
&& !atomic_read(&cq
->ref_count
)) {
4073 cqn
= cq
->com
.res_id
;
4074 state
= cq
->com
.from_state
;
4075 while (state
!= 0) {
4077 case RES_CQ_ALLOCATED
:
4078 __mlx4_cq_free_icm(dev
, cqn
);
4079 spin_lock_irq(mlx4_tlock(dev
));
4080 rb_erase(&cq
->com
.node
,
4081 &tracker
->res_tree
[RES_CQ
]);
4082 list_del(&cq
->com
.list
);
4083 spin_unlock_irq(mlx4_tlock(dev
));
4084 mlx4_release_resource(dev
, slave
,
4092 err
= mlx4_cmd(dev
, in_param
, cqn
, 1,
4094 MLX4_CMD_TIME_CLASS_A
,
4097 mlx4_dbg(dev
, "rem_slave_cqs: failed"
4098 " to move slave %d cq %d to"
4101 atomic_dec(&cq
->mtt
->ref_count
);
4102 state
= RES_CQ_ALLOCATED
;
4110 spin_lock_irq(mlx4_tlock(dev
));
4112 spin_unlock_irq(mlx4_tlock(dev
));
4115 static void rem_slave_mrs(struct mlx4_dev
*dev
, int slave
)
4117 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4118 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4119 struct list_head
*mpt_list
=
4120 &tracker
->slave_list
[slave
].res_list
[RES_MPT
];
4121 struct res_mpt
*mpt
;
4122 struct res_mpt
*tmp
;
4129 err
= move_all_busy(dev
, slave
, RES_MPT
);
4131 mlx4_warn(dev
, "rem_slave_mrs: Could not move all mpts to "
4132 "busy for slave %d\n", slave
);
4134 spin_lock_irq(mlx4_tlock(dev
));
4135 list_for_each_entry_safe(mpt
, tmp
, mpt_list
, com
.list
) {
4136 spin_unlock_irq(mlx4_tlock(dev
));
4137 if (mpt
->com
.owner
== slave
) {
4138 mptn
= mpt
->com
.res_id
;
4139 state
= mpt
->com
.from_state
;
4140 while (state
!= 0) {
4142 case RES_MPT_RESERVED
:
4143 __mlx4_mpt_release(dev
, mpt
->key
);
4144 spin_lock_irq(mlx4_tlock(dev
));
4145 rb_erase(&mpt
->com
.node
,
4146 &tracker
->res_tree
[RES_MPT
]);
4147 list_del(&mpt
->com
.list
);
4148 spin_unlock_irq(mlx4_tlock(dev
));
4149 mlx4_release_resource(dev
, slave
,
4155 case RES_MPT_MAPPED
:
4156 __mlx4_mpt_free_icm(dev
, mpt
->key
);
4157 state
= RES_MPT_RESERVED
;
4162 err
= mlx4_cmd(dev
, in_param
, mptn
, 0,
4164 MLX4_CMD_TIME_CLASS_A
,
4167 mlx4_dbg(dev
, "rem_slave_mrs: failed"
4168 " to move slave %d mpt %d to"
4172 atomic_dec(&mpt
->mtt
->ref_count
);
4173 state
= RES_MPT_MAPPED
;
4180 spin_lock_irq(mlx4_tlock(dev
));
4182 spin_unlock_irq(mlx4_tlock(dev
));
4185 static void rem_slave_mtts(struct mlx4_dev
*dev
, int slave
)
4187 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4188 struct mlx4_resource_tracker
*tracker
=
4189 &priv
->mfunc
.master
.res_tracker
;
4190 struct list_head
*mtt_list
=
4191 &tracker
->slave_list
[slave
].res_list
[RES_MTT
];
4192 struct res_mtt
*mtt
;
4193 struct res_mtt
*tmp
;
4199 err
= move_all_busy(dev
, slave
, RES_MTT
);
4201 mlx4_warn(dev
, "rem_slave_mtts: Could not move all mtts to "
4202 "busy for slave %d\n", slave
);
4204 spin_lock_irq(mlx4_tlock(dev
));
4205 list_for_each_entry_safe(mtt
, tmp
, mtt_list
, com
.list
) {
4206 spin_unlock_irq(mlx4_tlock(dev
));
4207 if (mtt
->com
.owner
== slave
) {
4208 base
= mtt
->com
.res_id
;
4209 state
= mtt
->com
.from_state
;
4210 while (state
!= 0) {
4212 case RES_MTT_ALLOCATED
:
4213 __mlx4_free_mtt_range(dev
, base
,
4215 spin_lock_irq(mlx4_tlock(dev
));
4216 rb_erase(&mtt
->com
.node
,
4217 &tracker
->res_tree
[RES_MTT
]);
4218 list_del(&mtt
->com
.list
);
4219 spin_unlock_irq(mlx4_tlock(dev
));
4220 mlx4_release_resource(dev
, slave
, RES_MTT
,
4221 1 << mtt
->order
, 0);
4231 spin_lock_irq(mlx4_tlock(dev
));
4233 spin_unlock_irq(mlx4_tlock(dev
));
4236 static void rem_slave_fs_rule(struct mlx4_dev
*dev
, int slave
)
4238 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4239 struct mlx4_resource_tracker
*tracker
=
4240 &priv
->mfunc
.master
.res_tracker
;
4241 struct list_head
*fs_rule_list
=
4242 &tracker
->slave_list
[slave
].res_list
[RES_FS_RULE
];
4243 struct res_fs_rule
*fs_rule
;
4244 struct res_fs_rule
*tmp
;
4249 err
= move_all_busy(dev
, slave
, RES_FS_RULE
);
4251 mlx4_warn(dev
, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4254 spin_lock_irq(mlx4_tlock(dev
));
4255 list_for_each_entry_safe(fs_rule
, tmp
, fs_rule_list
, com
.list
) {
4256 spin_unlock_irq(mlx4_tlock(dev
));
4257 if (fs_rule
->com
.owner
== slave
) {
4258 base
= fs_rule
->com
.res_id
;
4259 state
= fs_rule
->com
.from_state
;
4260 while (state
!= 0) {
4262 case RES_FS_RULE_ALLOCATED
:
4264 err
= mlx4_cmd(dev
, base
, 0, 0,
4265 MLX4_QP_FLOW_STEERING_DETACH
,
4266 MLX4_CMD_TIME_CLASS_A
,
4269 spin_lock_irq(mlx4_tlock(dev
));
4270 rb_erase(&fs_rule
->com
.node
,
4271 &tracker
->res_tree
[RES_FS_RULE
]);
4272 list_del(&fs_rule
->com
.list
);
4273 spin_unlock_irq(mlx4_tlock(dev
));
4283 spin_lock_irq(mlx4_tlock(dev
));
4285 spin_unlock_irq(mlx4_tlock(dev
));
4288 static void rem_slave_eqs(struct mlx4_dev
*dev
, int slave
)
4290 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4291 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4292 struct list_head
*eq_list
=
4293 &tracker
->slave_list
[slave
].res_list
[RES_EQ
];
4300 struct mlx4_cmd_mailbox
*mailbox
;
4302 err
= move_all_busy(dev
, slave
, RES_EQ
);
4304 mlx4_warn(dev
, "rem_slave_eqs: Could not move all eqs to "
4305 "busy for slave %d\n", slave
);
4307 spin_lock_irq(mlx4_tlock(dev
));
4308 list_for_each_entry_safe(eq
, tmp
, eq_list
, com
.list
) {
4309 spin_unlock_irq(mlx4_tlock(dev
));
4310 if (eq
->com
.owner
== slave
) {
4311 eqn
= eq
->com
.res_id
;
4312 state
= eq
->com
.from_state
;
4313 while (state
!= 0) {
4315 case RES_EQ_RESERVED
:
4316 spin_lock_irq(mlx4_tlock(dev
));
4317 rb_erase(&eq
->com
.node
,
4318 &tracker
->res_tree
[RES_EQ
]);
4319 list_del(&eq
->com
.list
);
4320 spin_unlock_irq(mlx4_tlock(dev
));
4326 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
4327 if (IS_ERR(mailbox
)) {
4331 err
= mlx4_cmd_box(dev
, slave
, 0,
4334 MLX4_CMD_TIME_CLASS_A
,
4337 mlx4_dbg(dev
, "rem_slave_eqs: failed"
4338 " to move slave %d eqs %d to"
4339 " SW ownership\n", slave
, eqn
);
4340 mlx4_free_cmd_mailbox(dev
, mailbox
);
4341 atomic_dec(&eq
->mtt
->ref_count
);
4342 state
= RES_EQ_RESERVED
;
4350 spin_lock_irq(mlx4_tlock(dev
));
4352 spin_unlock_irq(mlx4_tlock(dev
));
4355 static void rem_slave_counters(struct mlx4_dev
*dev
, int slave
)
4357 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4358 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4359 struct list_head
*counter_list
=
4360 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
];
4361 struct res_counter
*counter
;
4362 struct res_counter
*tmp
;
4366 err
= move_all_busy(dev
, slave
, RES_COUNTER
);
4368 mlx4_warn(dev
, "rem_slave_counters: Could not move all counters to "
4369 "busy for slave %d\n", slave
);
4371 spin_lock_irq(mlx4_tlock(dev
));
4372 list_for_each_entry_safe(counter
, tmp
, counter_list
, com
.list
) {
4373 if (counter
->com
.owner
== slave
) {
4374 index
= counter
->com
.res_id
;
4375 rb_erase(&counter
->com
.node
,
4376 &tracker
->res_tree
[RES_COUNTER
]);
4377 list_del(&counter
->com
.list
);
4379 __mlx4_counter_free(dev
, index
);
4380 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
4383 spin_unlock_irq(mlx4_tlock(dev
));
4386 static void rem_slave_xrcdns(struct mlx4_dev
*dev
, int slave
)
4388 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4389 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4390 struct list_head
*xrcdn_list
=
4391 &tracker
->slave_list
[slave
].res_list
[RES_XRCD
];
4392 struct res_xrcdn
*xrcd
;
4393 struct res_xrcdn
*tmp
;
4397 err
= move_all_busy(dev
, slave
, RES_XRCD
);
4399 mlx4_warn(dev
, "rem_slave_xrcdns: Could not move all xrcdns to "
4400 "busy for slave %d\n", slave
);
4402 spin_lock_irq(mlx4_tlock(dev
));
4403 list_for_each_entry_safe(xrcd
, tmp
, xrcdn_list
, com
.list
) {
4404 if (xrcd
->com
.owner
== slave
) {
4405 xrcdn
= xrcd
->com
.res_id
;
4406 rb_erase(&xrcd
->com
.node
, &tracker
->res_tree
[RES_XRCD
]);
4407 list_del(&xrcd
->com
.list
);
4409 __mlx4_xrcd_free(dev
, xrcdn
);
4412 spin_unlock_irq(mlx4_tlock(dev
));
4415 void mlx4_delete_all_resources_for_slave(struct mlx4_dev
*dev
, int slave
)
4417 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4419 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
4420 rem_slave_vlans(dev
, slave
);
4421 rem_slave_macs(dev
, slave
);
4422 rem_slave_fs_rule(dev
, slave
);
4423 rem_slave_qps(dev
, slave
);
4424 rem_slave_srqs(dev
, slave
);
4425 rem_slave_cqs(dev
, slave
);
4426 rem_slave_mrs(dev
, slave
);
4427 rem_slave_eqs(dev
, slave
);
4428 rem_slave_mtts(dev
, slave
);
4429 rem_slave_counters(dev
, slave
);
4430 rem_slave_xrcdns(dev
, slave
);
4431 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
4434 void mlx4_vf_immed_vlan_work_handler(struct work_struct
*_work
)
4436 struct mlx4_vf_immed_vlan_work
*work
=
4437 container_of(_work
, struct mlx4_vf_immed_vlan_work
, work
);
4438 struct mlx4_cmd_mailbox
*mailbox
;
4439 struct mlx4_update_qp_context
*upd_context
;
4440 struct mlx4_dev
*dev
= &work
->priv
->dev
;
4441 struct mlx4_resource_tracker
*tracker
=
4442 &work
->priv
->mfunc
.master
.res_tracker
;
4443 struct list_head
*qp_list
=
4444 &tracker
->slave_list
[work
->slave
].res_list
[RES_QP
];
4447 u64 qp_path_mask_vlan_ctrl
=
4448 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED
) |
4449 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P
) |
4450 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED
) |
4451 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED
) |
4452 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P
) |
4453 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED
));
4455 u64 qp_path_mask
= ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX
) |
4456 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL
) |
4457 (1ULL << MLX4_UPD_QP_PATH_MASK_CV
) |
4458 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN
) |
4459 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP
) |
4460 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX
) |
4461 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE
));
4464 int port
, errors
= 0;
4467 if (mlx4_is_slave(dev
)) {
4468 mlx4_warn(dev
, "Trying to update-qp in slave %d\n",
4473 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
4474 if (IS_ERR(mailbox
))
4476 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE
) /* block all */
4477 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
4478 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
4479 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
4480 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
4481 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
4482 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
4483 else if (!work
->vlan_id
)
4484 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
4485 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
4487 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
4488 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
4489 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
4491 upd_context
= mailbox
->buf
;
4492 upd_context
->qp_mask
= cpu_to_be64(MLX4_UPD_QP_MASK_VSD
);
4494 spin_lock_irq(mlx4_tlock(dev
));
4495 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
4496 spin_unlock_irq(mlx4_tlock(dev
));
4497 if (qp
->com
.owner
== work
->slave
) {
4498 if (qp
->com
.from_state
!= RES_QP_HW
||
4499 !qp
->sched_queue
|| /* no INIT2RTR trans yet */
4500 mlx4_is_qp_reserved(dev
, qp
->local_qpn
) ||
4501 qp
->qpc_flags
& (1 << MLX4_RSS_QPC_FLAG_OFFSET
)) {
4502 spin_lock_irq(mlx4_tlock(dev
));
4505 port
= (qp
->sched_queue
>> 6 & 1) + 1;
4506 if (port
!= work
->port
) {
4507 spin_lock_irq(mlx4_tlock(dev
));
4510 if (MLX4_QP_ST_RC
== ((qp
->qpc_flags
>> 16) & 0xff))
4511 upd_context
->primary_addr_path_mask
= cpu_to_be64(qp_path_mask
);
4513 upd_context
->primary_addr_path_mask
=
4514 cpu_to_be64(qp_path_mask
| qp_path_mask_vlan_ctrl
);
4515 if (work
->vlan_id
== MLX4_VGT
) {
4516 upd_context
->qp_context
.param3
= qp
->param3
;
4517 upd_context
->qp_context
.pri_path
.vlan_control
= qp
->vlan_control
;
4518 upd_context
->qp_context
.pri_path
.fvl_rx
= qp
->fvl_rx
;
4519 upd_context
->qp_context
.pri_path
.vlan_index
= qp
->vlan_index
;
4520 upd_context
->qp_context
.pri_path
.fl
= qp
->pri_path_fl
;
4521 upd_context
->qp_context
.pri_path
.feup
= qp
->feup
;
4522 upd_context
->qp_context
.pri_path
.sched_queue
=
4525 upd_context
->qp_context
.param3
= qp
->param3
& ~cpu_to_be32(MLX4_STRIP_VLAN
);
4526 upd_context
->qp_context
.pri_path
.vlan_control
= vlan_control
;
4527 upd_context
->qp_context
.pri_path
.vlan_index
= work
->vlan_ix
;
4528 upd_context
->qp_context
.pri_path
.fvl_rx
=
4529 qp
->fvl_rx
| MLX4_FVL_RX_FORCE_ETH_VLAN
;
4530 upd_context
->qp_context
.pri_path
.fl
=
4531 qp
->pri_path_fl
| MLX4_FL_CV
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
4532 upd_context
->qp_context
.pri_path
.feup
=
4533 qp
->feup
| MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
4534 upd_context
->qp_context
.pri_path
.sched_queue
=
4535 qp
->sched_queue
& 0xC7;
4536 upd_context
->qp_context
.pri_path
.sched_queue
|=
4537 ((work
->qos
& 0x7) << 3);
4540 err
= mlx4_cmd(dev
, mailbox
->dma
,
4541 qp
->local_qpn
& 0xffffff,
4542 0, MLX4_CMD_UPDATE_QP
,
4543 MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
4545 mlx4_info(dev
, "UPDATE_QP failed for slave %d, "
4546 "port %d, qpn %d (%d)\n",
4547 work
->slave
, port
, qp
->local_qpn
,
4552 spin_lock_irq(mlx4_tlock(dev
));
4554 spin_unlock_irq(mlx4_tlock(dev
));
4555 mlx4_free_cmd_mailbox(dev
, mailbox
);
4558 mlx4_err(dev
, "%d UPDATE_QP failures for slave %d, port %d\n",
4559 errors
, work
->slave
, work
->port
);
4561 /* unregister previous vlan_id if needed and we had no errors
4562 * while updating the QPs
4564 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_VLAN
&& !errors
&&
4565 NO_INDX
!= work
->orig_vlan_ix
)
4566 __mlx4_unregister_vlan(&work
->priv
->dev
, work
->port
,
4567 work
->orig_vlan_id
);