2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
49 #include "mlx4_stats.h"
51 #define MLX4_MAC_VALID (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT 2
53 #define MLX4_VF_COUNTERS_PER_PORT 1
56 struct list_head list
;
64 struct list_head list
;
72 struct list_head list
;
87 struct list_head list
;
89 enum mlx4_protocol prot
;
90 enum mlx4_steer_type steer
;
95 RES_QP_BUSY
= RES_ANY_BUSY
,
97 /* QP number was allocated */
100 /* ICM memory for QP context was mapped */
103 /* QP is in hw ownership */
108 struct res_common com
;
113 struct list_head mcg_list
;
118 /* saved qp params before VST enforcement in order to restore on VGT */
128 enum res_mtt_states
{
129 RES_MTT_BUSY
= RES_ANY_BUSY
,
133 static inline const char *mtt_states_str(enum res_mtt_states state
)
136 case RES_MTT_BUSY
: return "RES_MTT_BUSY";
137 case RES_MTT_ALLOCATED
: return "RES_MTT_ALLOCATED";
138 default: return "Unknown";
143 struct res_common com
;
148 enum res_mpt_states
{
149 RES_MPT_BUSY
= RES_ANY_BUSY
,
156 struct res_common com
;
162 RES_EQ_BUSY
= RES_ANY_BUSY
,
168 struct res_common com
;
173 RES_CQ_BUSY
= RES_ANY_BUSY
,
179 struct res_common com
;
184 enum res_srq_states
{
185 RES_SRQ_BUSY
= RES_ANY_BUSY
,
191 struct res_common com
;
197 enum res_counter_states
{
198 RES_COUNTER_BUSY
= RES_ANY_BUSY
,
199 RES_COUNTER_ALLOCATED
,
203 struct res_common com
;
207 enum res_xrcdn_states
{
208 RES_XRCD_BUSY
= RES_ANY_BUSY
,
213 struct res_common com
;
217 enum res_fs_rule_states
{
218 RES_FS_RULE_BUSY
= RES_ANY_BUSY
,
219 RES_FS_RULE_ALLOCATED
,
223 struct res_common com
;
227 static void *res_tracker_lookup(struct rb_root
*root
, u64 res_id
)
229 struct rb_node
*node
= root
->rb_node
;
232 struct res_common
*res
= container_of(node
, struct res_common
,
235 if (res_id
< res
->res_id
)
236 node
= node
->rb_left
;
237 else if (res_id
> res
->res_id
)
238 node
= node
->rb_right
;
245 static int res_tracker_insert(struct rb_root
*root
, struct res_common
*res
)
247 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
249 /* Figure out where to put new node */
251 struct res_common
*this = container_of(*new, struct res_common
,
255 if (res
->res_id
< this->res_id
)
256 new = &((*new)->rb_left
);
257 else if (res
->res_id
> this->res_id
)
258 new = &((*new)->rb_right
);
263 /* Add new node and rebalance tree. */
264 rb_link_node(&res
->node
, parent
, new);
265 rb_insert_color(&res
->node
, root
);
280 static const char *resource_str(enum mlx4_resource rt
)
283 case RES_QP
: return "RES_QP";
284 case RES_CQ
: return "RES_CQ";
285 case RES_SRQ
: return "RES_SRQ";
286 case RES_MPT
: return "RES_MPT";
287 case RES_MTT
: return "RES_MTT";
288 case RES_MAC
: return "RES_MAC";
289 case RES_VLAN
: return "RES_VLAN";
290 case RES_EQ
: return "RES_EQ";
291 case RES_COUNTER
: return "RES_COUNTER";
292 case RES_FS_RULE
: return "RES_FS_RULE";
293 case RES_XRCD
: return "RES_XRCD";
294 default: return "Unknown resource type !!!";
298 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
);
299 static inline int mlx4_grant_resource(struct mlx4_dev
*dev
, int slave
,
300 enum mlx4_resource res_type
, int count
,
303 struct mlx4_priv
*priv
= mlx4_priv(dev
);
304 struct resource_allocator
*res_alloc
=
305 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
307 int allocated
, free
, reserved
, guaranteed
, from_free
;
310 if (slave
> dev
->persist
->num_vfs
)
313 spin_lock(&res_alloc
->alloc_lock
);
314 allocated
= (port
> 0) ?
315 res_alloc
->allocated
[(port
- 1) *
316 (dev
->persist
->num_vfs
+ 1) + slave
] :
317 res_alloc
->allocated
[slave
];
318 free
= (port
> 0) ? res_alloc
->res_port_free
[port
- 1] :
320 reserved
= (port
> 0) ? res_alloc
->res_port_rsvd
[port
- 1] :
321 res_alloc
->res_reserved
;
322 guaranteed
= res_alloc
->guaranteed
[slave
];
324 if (allocated
+ count
> res_alloc
->quota
[slave
]) {
325 mlx4_warn(dev
, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
326 slave
, port
, resource_str(res_type
), count
,
327 allocated
, res_alloc
->quota
[slave
]);
331 if (allocated
+ count
<= guaranteed
) {
335 /* portion may need to be obtained from free area */
336 if (guaranteed
- allocated
> 0)
337 from_free
= count
- (guaranteed
- allocated
);
341 from_rsvd
= count
- from_free
;
343 if (free
- from_free
>= reserved
)
346 mlx4_warn(dev
, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
347 slave
, port
, resource_str(res_type
), free
,
348 from_free
, reserved
);
352 /* grant the request */
354 res_alloc
->allocated
[(port
- 1) *
355 (dev
->persist
->num_vfs
+ 1) + slave
] += count
;
356 res_alloc
->res_port_free
[port
- 1] -= count
;
357 res_alloc
->res_port_rsvd
[port
- 1] -= from_rsvd
;
359 res_alloc
->allocated
[slave
] += count
;
360 res_alloc
->res_free
-= count
;
361 res_alloc
->res_reserved
-= from_rsvd
;
366 spin_unlock(&res_alloc
->alloc_lock
);
370 static inline void mlx4_release_resource(struct mlx4_dev
*dev
, int slave
,
371 enum mlx4_resource res_type
, int count
,
374 struct mlx4_priv
*priv
= mlx4_priv(dev
);
375 struct resource_allocator
*res_alloc
=
376 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
377 int allocated
, guaranteed
, from_rsvd
;
379 if (slave
> dev
->persist
->num_vfs
)
382 spin_lock(&res_alloc
->alloc_lock
);
384 allocated
= (port
> 0) ?
385 res_alloc
->allocated
[(port
- 1) *
386 (dev
->persist
->num_vfs
+ 1) + slave
] :
387 res_alloc
->allocated
[slave
];
388 guaranteed
= res_alloc
->guaranteed
[slave
];
390 if (allocated
- count
>= guaranteed
) {
393 /* portion may need to be returned to reserved area */
394 if (allocated
- guaranteed
> 0)
395 from_rsvd
= count
- (allocated
- guaranteed
);
401 res_alloc
->allocated
[(port
- 1) *
402 (dev
->persist
->num_vfs
+ 1) + slave
] -= count
;
403 res_alloc
->res_port_free
[port
- 1] += count
;
404 res_alloc
->res_port_rsvd
[port
- 1] += from_rsvd
;
406 res_alloc
->allocated
[slave
] -= count
;
407 res_alloc
->res_free
+= count
;
408 res_alloc
->res_reserved
+= from_rsvd
;
411 spin_unlock(&res_alloc
->alloc_lock
);
415 static inline void initialize_res_quotas(struct mlx4_dev
*dev
,
416 struct resource_allocator
*res_alloc
,
417 enum mlx4_resource res_type
,
418 int vf
, int num_instances
)
420 res_alloc
->guaranteed
[vf
] = num_instances
/
421 (2 * (dev
->persist
->num_vfs
+ 1));
422 res_alloc
->quota
[vf
] = (num_instances
/ 2) + res_alloc
->guaranteed
[vf
];
423 if (vf
== mlx4_master_func_num(dev
)) {
424 res_alloc
->res_free
= num_instances
;
425 if (res_type
== RES_MTT
) {
426 /* reserved mtts will be taken out of the PF allocation */
427 res_alloc
->res_free
+= dev
->caps
.reserved_mtts
;
428 res_alloc
->guaranteed
[vf
] += dev
->caps
.reserved_mtts
;
429 res_alloc
->quota
[vf
] += dev
->caps
.reserved_mtts
;
434 void mlx4_init_quotas(struct mlx4_dev
*dev
)
436 struct mlx4_priv
*priv
= mlx4_priv(dev
);
439 /* quotas for VFs are initialized in mlx4_slave_cap */
440 if (mlx4_is_slave(dev
))
443 if (!mlx4_is_mfunc(dev
)) {
444 dev
->quotas
.qp
= dev
->caps
.num_qps
- dev
->caps
.reserved_qps
-
445 mlx4_num_reserved_sqps(dev
);
446 dev
->quotas
.cq
= dev
->caps
.num_cqs
- dev
->caps
.reserved_cqs
;
447 dev
->quotas
.srq
= dev
->caps
.num_srqs
- dev
->caps
.reserved_srqs
;
448 dev
->quotas
.mtt
= dev
->caps
.num_mtts
- dev
->caps
.reserved_mtts
;
449 dev
->quotas
.mpt
= dev
->caps
.num_mpts
- dev
->caps
.reserved_mrws
;
453 pf
= mlx4_master_func_num(dev
);
455 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[pf
];
457 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[pf
];
459 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[pf
];
461 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[pf
];
463 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[pf
];
466 static int get_max_gauranteed_vfs_counter(struct mlx4_dev
*dev
)
468 /* reduce the sink counter */
469 return (dev
->caps
.max_counters
- 1 -
470 (MLX4_PF_COUNTERS_PER_PORT
* MLX4_MAX_PORTS
))
474 int mlx4_init_resource_tracker(struct mlx4_dev
*dev
)
476 struct mlx4_priv
*priv
= mlx4_priv(dev
);
479 int max_vfs_guarantee_counter
= get_max_gauranteed_vfs_counter(dev
);
481 priv
->mfunc
.master
.res_tracker
.slave_list
=
482 kzalloc(dev
->num_slaves
* sizeof(struct slave_list
),
484 if (!priv
->mfunc
.master
.res_tracker
.slave_list
)
487 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
488 for (t
= 0; t
< MLX4_NUM_OF_RESOURCE_TYPE
; ++t
)
489 INIT_LIST_HEAD(&priv
->mfunc
.master
.res_tracker
.
490 slave_list
[i
].res_list
[t
]);
491 mutex_init(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
494 mlx4_dbg(dev
, "Started init_resource_tracker: %ld slaves\n",
496 for (i
= 0 ; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++)
497 priv
->mfunc
.master
.res_tracker
.res_tree
[i
] = RB_ROOT
;
499 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
500 struct resource_allocator
*res_alloc
=
501 &priv
->mfunc
.master
.res_tracker
.res_alloc
[i
];
502 res_alloc
->quota
= kmalloc((dev
->persist
->num_vfs
+ 1) *
503 sizeof(int), GFP_KERNEL
);
504 res_alloc
->guaranteed
= kmalloc((dev
->persist
->num_vfs
+ 1) *
505 sizeof(int), GFP_KERNEL
);
506 if (i
== RES_MAC
|| i
== RES_VLAN
)
507 res_alloc
->allocated
= kzalloc(MLX4_MAX_PORTS
*
508 (dev
->persist
->num_vfs
510 sizeof(int), GFP_KERNEL
);
512 res_alloc
->allocated
= kzalloc((dev
->persist
->
514 sizeof(int), GFP_KERNEL
);
515 /* Reduce the sink counter */
516 if (i
== RES_COUNTER
)
517 res_alloc
->res_free
= dev
->caps
.max_counters
- 1;
519 if (!res_alloc
->quota
|| !res_alloc
->guaranteed
||
520 !res_alloc
->allocated
)
523 spin_lock_init(&res_alloc
->alloc_lock
);
524 for (t
= 0; t
< dev
->persist
->num_vfs
+ 1; t
++) {
525 struct mlx4_active_ports actv_ports
=
526 mlx4_get_active_ports(dev
, t
);
529 initialize_res_quotas(dev
, res_alloc
, RES_QP
,
530 t
, dev
->caps
.num_qps
-
531 dev
->caps
.reserved_qps
-
532 mlx4_num_reserved_sqps(dev
));
535 initialize_res_quotas(dev
, res_alloc
, RES_CQ
,
536 t
, dev
->caps
.num_cqs
-
537 dev
->caps
.reserved_cqs
);
540 initialize_res_quotas(dev
, res_alloc
, RES_SRQ
,
541 t
, dev
->caps
.num_srqs
-
542 dev
->caps
.reserved_srqs
);
545 initialize_res_quotas(dev
, res_alloc
, RES_MPT
,
546 t
, dev
->caps
.num_mpts
-
547 dev
->caps
.reserved_mrws
);
550 initialize_res_quotas(dev
, res_alloc
, RES_MTT
,
551 t
, dev
->caps
.num_mtts
-
552 dev
->caps
.reserved_mtts
);
555 if (t
== mlx4_master_func_num(dev
)) {
556 int max_vfs_pport
= 0;
557 /* Calculate the max vfs per port for */
559 for (j
= 0; j
< dev
->caps
.num_ports
;
561 struct mlx4_slaves_pport slaves_pport
=
562 mlx4_phys_to_slaves_pport(dev
, j
+ 1);
563 unsigned current_slaves
=
564 bitmap_weight(slaves_pport
.slaves
,
565 dev
->caps
.num_ports
) - 1;
566 if (max_vfs_pport
< current_slaves
)
570 res_alloc
->quota
[t
] =
573 res_alloc
->guaranteed
[t
] = 2;
574 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
575 res_alloc
->res_port_free
[j
] =
578 res_alloc
->quota
[t
] = MLX4_MAX_MAC_NUM
;
579 res_alloc
->guaranteed
[t
] = 2;
583 if (t
== mlx4_master_func_num(dev
)) {
584 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
;
585 res_alloc
->guaranteed
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
586 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
587 res_alloc
->res_port_free
[j
] =
590 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
591 res_alloc
->guaranteed
[t
] = 0;
595 res_alloc
->quota
[t
] = dev
->caps
.max_counters
;
596 if (t
== mlx4_master_func_num(dev
))
597 res_alloc
->guaranteed
[t
] =
598 MLX4_PF_COUNTERS_PER_PORT
*
600 else if (t
<= max_vfs_guarantee_counter
)
601 res_alloc
->guaranteed
[t
] =
602 MLX4_VF_COUNTERS_PER_PORT
*
605 res_alloc
->guaranteed
[t
] = 0;
606 res_alloc
->res_free
-= res_alloc
->guaranteed
[t
];
611 if (i
== RES_MAC
|| i
== RES_VLAN
) {
612 for (j
= 0; j
< dev
->caps
.num_ports
; j
++)
613 if (test_bit(j
, actv_ports
.ports
))
614 res_alloc
->res_port_rsvd
[j
] +=
615 res_alloc
->guaranteed
[t
];
617 res_alloc
->res_reserved
+= res_alloc
->guaranteed
[t
];
621 spin_lock_init(&priv
->mfunc
.master
.res_tracker
.lock
);
625 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
626 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
627 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
628 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
629 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
630 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
631 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
636 void mlx4_free_resource_tracker(struct mlx4_dev
*dev
,
637 enum mlx4_res_tracker_free_type type
)
639 struct mlx4_priv
*priv
= mlx4_priv(dev
);
642 if (priv
->mfunc
.master
.res_tracker
.slave_list
) {
643 if (type
!= RES_TR_FREE_STRUCTS_ONLY
) {
644 for (i
= 0; i
< dev
->num_slaves
; i
++) {
645 if (type
== RES_TR_FREE_ALL
||
646 dev
->caps
.function
!= i
)
647 mlx4_delete_all_resources_for_slave(dev
, i
);
649 /* free master's vlans */
650 i
= dev
->caps
.function
;
651 mlx4_reset_roce_gids(dev
, i
);
652 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
653 rem_slave_vlans(dev
, i
);
654 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
657 if (type
!= RES_TR_FREE_SLAVES_ONLY
) {
658 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
659 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
660 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
661 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
662 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
663 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
664 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
666 kfree(priv
->mfunc
.master
.res_tracker
.slave_list
);
667 priv
->mfunc
.master
.res_tracker
.slave_list
= NULL
;
672 static void update_pkey_index(struct mlx4_dev
*dev
, int slave
,
673 struct mlx4_cmd_mailbox
*inbox
)
675 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
676 u8 orig_index
= *(u8
*)(inbox
->buf
+ 35);
678 struct mlx4_priv
*priv
= mlx4_priv(dev
);
681 port
= (sched
>> 6 & 1) + 1;
683 new_index
= priv
->virt2phys_pkey
[slave
][port
- 1][orig_index
];
684 *(u8
*)(inbox
->buf
+ 35) = new_index
;
687 static void update_gid(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*inbox
,
690 struct mlx4_qp_context
*qp_ctx
= inbox
->buf
+ 8;
691 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
692 u32 ts
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
695 if (MLX4_QP_ST_UD
== ts
) {
696 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
697 if (mlx4_is_eth(dev
, port
))
698 qp_ctx
->pri_path
.mgid_index
=
699 mlx4_get_base_gid_ix(dev
, slave
, port
) | 0x80;
701 qp_ctx
->pri_path
.mgid_index
= slave
| 0x80;
703 } else if (MLX4_QP_ST_RC
== ts
|| MLX4_QP_ST_XRC
== ts
|| MLX4_QP_ST_UC
== ts
) {
704 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
705 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
706 if (mlx4_is_eth(dev
, port
)) {
707 qp_ctx
->pri_path
.mgid_index
+=
708 mlx4_get_base_gid_ix(dev
, slave
, port
);
709 qp_ctx
->pri_path
.mgid_index
&= 0x7f;
711 qp_ctx
->pri_path
.mgid_index
= slave
& 0x7F;
714 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
715 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
716 if (mlx4_is_eth(dev
, port
)) {
717 qp_ctx
->alt_path
.mgid_index
+=
718 mlx4_get_base_gid_ix(dev
, slave
, port
);
719 qp_ctx
->alt_path
.mgid_index
&= 0x7f;
721 qp_ctx
->alt_path
.mgid_index
= slave
& 0x7F;
727 static int handle_counter(struct mlx4_dev
*dev
, struct mlx4_qp_context
*qpc
,
730 static int update_vport_qp_param(struct mlx4_dev
*dev
,
731 struct mlx4_cmd_mailbox
*inbox
,
734 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
735 struct mlx4_vport_oper_state
*vp_oper
;
736 struct mlx4_priv
*priv
;
740 port
= (qpc
->pri_path
.sched_queue
& 0x40) ? 2 : 1;
741 priv
= mlx4_priv(dev
);
742 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
743 qp_type
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
745 err
= handle_counter(dev
, qpc
, slave
, port
);
749 if (MLX4_VGT
!= vp_oper
->state
.default_vlan
) {
750 /* the reserved QPs (special, proxy, tunnel)
751 * do not operate over vlans
753 if (mlx4_is_qp_reserved(dev
, qpn
))
756 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
757 if (qp_type
== MLX4_QP_ST_UD
||
758 (qp_type
== MLX4_QP_ST_MLX
&& mlx4_is_eth(dev
, port
))) {
759 if (dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_VSD_INIT2RTR
) {
760 *(__be32
*)inbox
->buf
=
761 cpu_to_be32(be32_to_cpu(*(__be32
*)inbox
->buf
) |
762 MLX4_QP_OPTPAR_VLAN_STRIPPING
);
763 qpc
->param3
&= ~cpu_to_be32(MLX4_STRIP_VLAN
);
765 struct mlx4_update_qp_params params
= {.flags
= 0};
767 err
= mlx4_update_qp(dev
, qpn
, MLX4_UPDATE_QP_VSD
, ¶ms
);
773 if (vp_oper
->state
.link_state
== IFLA_VF_LINK_STATE_DISABLE
&&
774 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
) {
775 qpc
->pri_path
.vlan_control
=
776 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
777 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
778 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
779 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
780 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
781 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
782 } else if (0 != vp_oper
->state
.default_vlan
) {
783 qpc
->pri_path
.vlan_control
=
784 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
785 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
786 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
787 } else { /* priority tagged */
788 qpc
->pri_path
.vlan_control
=
789 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
790 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
793 qpc
->pri_path
.fvl_rx
|= MLX4_FVL_RX_FORCE_ETH_VLAN
;
794 qpc
->pri_path
.vlan_index
= vp_oper
->vlan_idx
;
795 qpc
->pri_path
.fl
|= MLX4_FL_CV
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
796 qpc
->pri_path
.feup
|= MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
797 qpc
->pri_path
.sched_queue
&= 0xC7;
798 qpc
->pri_path
.sched_queue
|= (vp_oper
->state
.default_qos
) << 3;
799 qpc
->qos_vport
= vp_oper
->state
.qos_vport
;
801 if (vp_oper
->state
.spoofchk
) {
802 qpc
->pri_path
.feup
|= MLX4_FSM_FORCE_ETH_SRC_MAC
;
803 qpc
->pri_path
.grh_mylmc
= (0x80 & qpc
->pri_path
.grh_mylmc
) + vp_oper
->mac_idx
;
809 static int mpt_mask(struct mlx4_dev
*dev
)
811 return dev
->caps
.num_mpts
- 1;
814 static void *find_res(struct mlx4_dev
*dev
, u64 res_id
,
815 enum mlx4_resource type
)
817 struct mlx4_priv
*priv
= mlx4_priv(dev
);
819 return res_tracker_lookup(&priv
->mfunc
.master
.res_tracker
.res_tree
[type
],
823 static int get_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
824 enum mlx4_resource type
,
827 struct res_common
*r
;
830 spin_lock_irq(mlx4_tlock(dev
));
831 r
= find_res(dev
, res_id
, type
);
837 if (r
->state
== RES_ANY_BUSY
) {
842 if (r
->owner
!= slave
) {
847 r
->from_state
= r
->state
;
848 r
->state
= RES_ANY_BUSY
;
851 *((struct res_common
**)res
) = r
;
854 spin_unlock_irq(mlx4_tlock(dev
));
858 int mlx4_get_slave_from_resource_id(struct mlx4_dev
*dev
,
859 enum mlx4_resource type
,
860 u64 res_id
, int *slave
)
863 struct res_common
*r
;
869 spin_lock(mlx4_tlock(dev
));
871 r
= find_res(dev
, id
, type
);
876 spin_unlock(mlx4_tlock(dev
));
881 static void put_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
882 enum mlx4_resource type
)
884 struct res_common
*r
;
886 spin_lock_irq(mlx4_tlock(dev
));
887 r
= find_res(dev
, res_id
, type
);
889 r
->state
= r
->from_state
;
890 spin_unlock_irq(mlx4_tlock(dev
));
893 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
894 u64 in_param
, u64
*out_param
, int port
);
896 static int handle_existing_counter(struct mlx4_dev
*dev
, u8 slave
, int port
,
899 struct res_common
*r
;
900 struct res_counter
*counter
;
903 if (counter_index
== MLX4_SINK_COUNTER_INDEX(dev
))
906 spin_lock_irq(mlx4_tlock(dev
));
907 r
= find_res(dev
, counter_index
, RES_COUNTER
);
908 if (!r
|| r
->owner
!= slave
)
910 counter
= container_of(r
, struct res_counter
, com
);
912 counter
->port
= port
;
914 spin_unlock_irq(mlx4_tlock(dev
));
918 static int handle_unexisting_counter(struct mlx4_dev
*dev
,
919 struct mlx4_qp_context
*qpc
, u8 slave
,
922 struct mlx4_priv
*priv
= mlx4_priv(dev
);
923 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
924 struct res_common
*tmp
;
925 struct res_counter
*counter
;
926 u64 counter_idx
= MLX4_SINK_COUNTER_INDEX(dev
);
929 spin_lock_irq(mlx4_tlock(dev
));
930 list_for_each_entry(tmp
,
931 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
],
933 counter
= container_of(tmp
, struct res_counter
, com
);
934 if (port
== counter
->port
) {
935 qpc
->pri_path
.counter_index
= counter
->com
.res_id
;
936 spin_unlock_irq(mlx4_tlock(dev
));
940 spin_unlock_irq(mlx4_tlock(dev
));
942 /* No existing counter, need to allocate a new counter */
943 err
= counter_alloc_res(dev
, slave
, RES_OP_RESERVE
, 0, 0, &counter_idx
,
945 if (err
== -ENOENT
) {
947 } else if (err
&& err
!= -ENOSPC
) {
948 mlx4_err(dev
, "%s: failed to create new counter for slave %d err %d\n",
949 __func__
, slave
, err
);
951 qpc
->pri_path
.counter_index
= counter_idx
;
952 mlx4_dbg(dev
, "%s: alloc new counter for slave %d index %d\n",
953 __func__
, slave
, qpc
->pri_path
.counter_index
);
960 static int handle_counter(struct mlx4_dev
*dev
, struct mlx4_qp_context
*qpc
,
963 if (qpc
->pri_path
.counter_index
!= MLX4_SINK_COUNTER_INDEX(dev
))
964 return handle_existing_counter(dev
, slave
, port
,
965 qpc
->pri_path
.counter_index
);
967 return handle_unexisting_counter(dev
, qpc
, slave
, port
);
970 static struct res_common
*alloc_qp_tr(int id
)
974 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
978 ret
->com
.res_id
= id
;
979 ret
->com
.state
= RES_QP_RESERVED
;
981 INIT_LIST_HEAD(&ret
->mcg_list
);
982 spin_lock_init(&ret
->mcg_spl
);
983 atomic_set(&ret
->ref_count
, 0);
988 static struct res_common
*alloc_mtt_tr(int id
, int order
)
992 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
996 ret
->com
.res_id
= id
;
998 ret
->com
.state
= RES_MTT_ALLOCATED
;
999 atomic_set(&ret
->ref_count
, 0);
1004 static struct res_common
*alloc_mpt_tr(int id
, int key
)
1006 struct res_mpt
*ret
;
1008 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1012 ret
->com
.res_id
= id
;
1013 ret
->com
.state
= RES_MPT_RESERVED
;
1019 static struct res_common
*alloc_eq_tr(int id
)
1023 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1027 ret
->com
.res_id
= id
;
1028 ret
->com
.state
= RES_EQ_RESERVED
;
1033 static struct res_common
*alloc_cq_tr(int id
)
1037 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1041 ret
->com
.res_id
= id
;
1042 ret
->com
.state
= RES_CQ_ALLOCATED
;
1043 atomic_set(&ret
->ref_count
, 0);
1048 static struct res_common
*alloc_srq_tr(int id
)
1050 struct res_srq
*ret
;
1052 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1056 ret
->com
.res_id
= id
;
1057 ret
->com
.state
= RES_SRQ_ALLOCATED
;
1058 atomic_set(&ret
->ref_count
, 0);
1063 static struct res_common
*alloc_counter_tr(int id
, int port
)
1065 struct res_counter
*ret
;
1067 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1071 ret
->com
.res_id
= id
;
1072 ret
->com
.state
= RES_COUNTER_ALLOCATED
;
1078 static struct res_common
*alloc_xrcdn_tr(int id
)
1080 struct res_xrcdn
*ret
;
1082 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1086 ret
->com
.res_id
= id
;
1087 ret
->com
.state
= RES_XRCD_ALLOCATED
;
1092 static struct res_common
*alloc_fs_rule_tr(u64 id
, int qpn
)
1094 struct res_fs_rule
*ret
;
1096 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
1100 ret
->com
.res_id
= id
;
1101 ret
->com
.state
= RES_FS_RULE_ALLOCATED
;
1106 static struct res_common
*alloc_tr(u64 id
, enum mlx4_resource type
, int slave
,
1109 struct res_common
*ret
;
1113 ret
= alloc_qp_tr(id
);
1116 ret
= alloc_mpt_tr(id
, extra
);
1119 ret
= alloc_mtt_tr(id
, extra
);
1122 ret
= alloc_eq_tr(id
);
1125 ret
= alloc_cq_tr(id
);
1128 ret
= alloc_srq_tr(id
);
1131 pr_err("implementation missing\n");
1134 ret
= alloc_counter_tr(id
, extra
);
1137 ret
= alloc_xrcdn_tr(id
);
1140 ret
= alloc_fs_rule_tr(id
, extra
);
1151 int mlx4_calc_vf_counters(struct mlx4_dev
*dev
, int slave
, int port
,
1152 struct mlx4_counter
*data
)
1154 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1155 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1156 struct res_common
*tmp
;
1157 struct res_counter
*counter
;
1161 memset(data
, 0, sizeof(*data
));
1163 counters_arr
= kmalloc_array(dev
->caps
.max_counters
,
1164 sizeof(*counters_arr
), GFP_KERNEL
);
1168 spin_lock_irq(mlx4_tlock(dev
));
1169 list_for_each_entry(tmp
,
1170 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
],
1172 counter
= container_of(tmp
, struct res_counter
, com
);
1173 if (counter
->port
== port
) {
1174 counters_arr
[i
] = (int)tmp
->res_id
;
1178 spin_unlock_irq(mlx4_tlock(dev
));
1179 counters_arr
[i
] = -1;
1183 while (counters_arr
[i
] != -1) {
1184 err
= mlx4_get_counter_stats(dev
, counters_arr
[i
], data
,
1187 memset(data
, 0, sizeof(*data
));
1194 kfree(counters_arr
);
1198 static int add_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1199 enum mlx4_resource type
, int extra
)
1203 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1204 struct res_common
**res_arr
;
1205 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1206 struct rb_root
*root
= &tracker
->res_tree
[type
];
1208 res_arr
= kzalloc(count
* sizeof *res_arr
, GFP_KERNEL
);
1212 for (i
= 0; i
< count
; ++i
) {
1213 res_arr
[i
] = alloc_tr(base
+ i
, type
, slave
, extra
);
1215 for (--i
; i
>= 0; --i
)
1223 spin_lock_irq(mlx4_tlock(dev
));
1224 for (i
= 0; i
< count
; ++i
) {
1225 if (find_res(dev
, base
+ i
, type
)) {
1229 err
= res_tracker_insert(root
, res_arr
[i
]);
1232 list_add_tail(&res_arr
[i
]->list
,
1233 &tracker
->slave_list
[slave
].res_list
[type
]);
1235 spin_unlock_irq(mlx4_tlock(dev
));
1241 for (--i
; i
>= 0; --i
) {
1242 rb_erase(&res_arr
[i
]->node
, root
);
1243 list_del_init(&res_arr
[i
]->list
);
1246 spin_unlock_irq(mlx4_tlock(dev
));
1248 for (i
= 0; i
< count
; ++i
)
1256 static int remove_qp_ok(struct res_qp
*res
)
1258 if (res
->com
.state
== RES_QP_BUSY
|| atomic_read(&res
->ref_count
) ||
1259 !list_empty(&res
->mcg_list
)) {
1260 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1261 res
->com
.state
, atomic_read(&res
->ref_count
));
1263 } else if (res
->com
.state
!= RES_QP_RESERVED
) {
1270 static int remove_mtt_ok(struct res_mtt
*res
, int order
)
1272 if (res
->com
.state
== RES_MTT_BUSY
||
1273 atomic_read(&res
->ref_count
)) {
1274 pr_devel("%s-%d: state %s, ref_count %d\n",
1276 mtt_states_str(res
->com
.state
),
1277 atomic_read(&res
->ref_count
));
1279 } else if (res
->com
.state
!= RES_MTT_ALLOCATED
)
1281 else if (res
->order
!= order
)
1287 static int remove_mpt_ok(struct res_mpt
*res
)
1289 if (res
->com
.state
== RES_MPT_BUSY
)
1291 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1297 static int remove_eq_ok(struct res_eq
*res
)
1299 if (res
->com
.state
== RES_MPT_BUSY
)
1301 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1307 static int remove_counter_ok(struct res_counter
*res
)
1309 if (res
->com
.state
== RES_COUNTER_BUSY
)
1311 else if (res
->com
.state
!= RES_COUNTER_ALLOCATED
)
1317 static int remove_xrcdn_ok(struct res_xrcdn
*res
)
1319 if (res
->com
.state
== RES_XRCD_BUSY
)
1321 else if (res
->com
.state
!= RES_XRCD_ALLOCATED
)
1327 static int remove_fs_rule_ok(struct res_fs_rule
*res
)
1329 if (res
->com
.state
== RES_FS_RULE_BUSY
)
1331 else if (res
->com
.state
!= RES_FS_RULE_ALLOCATED
)
1337 static int remove_cq_ok(struct res_cq
*res
)
1339 if (res
->com
.state
== RES_CQ_BUSY
)
1341 else if (res
->com
.state
!= RES_CQ_ALLOCATED
)
1347 static int remove_srq_ok(struct res_srq
*res
)
1349 if (res
->com
.state
== RES_SRQ_BUSY
)
1351 else if (res
->com
.state
!= RES_SRQ_ALLOCATED
)
1357 static int remove_ok(struct res_common
*res
, enum mlx4_resource type
, int extra
)
1361 return remove_qp_ok((struct res_qp
*)res
);
1363 return remove_cq_ok((struct res_cq
*)res
);
1365 return remove_srq_ok((struct res_srq
*)res
);
1367 return remove_mpt_ok((struct res_mpt
*)res
);
1369 return remove_mtt_ok((struct res_mtt
*)res
, extra
);
1373 return remove_eq_ok((struct res_eq
*)res
);
1375 return remove_counter_ok((struct res_counter
*)res
);
1377 return remove_xrcdn_ok((struct res_xrcdn
*)res
);
1379 return remove_fs_rule_ok((struct res_fs_rule
*)res
);
1385 static int rem_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1386 enum mlx4_resource type
, int extra
)
1390 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1391 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1392 struct res_common
*r
;
1394 spin_lock_irq(mlx4_tlock(dev
));
1395 for (i
= base
; i
< base
+ count
; ++i
) {
1396 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1401 if (r
->owner
!= slave
) {
1405 err
= remove_ok(r
, type
, extra
);
1410 for (i
= base
; i
< base
+ count
; ++i
) {
1411 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1412 rb_erase(&r
->node
, &tracker
->res_tree
[type
]);
1419 spin_unlock_irq(mlx4_tlock(dev
));
1424 static int qp_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int qpn
,
1425 enum res_qp_states state
, struct res_qp
**qp
,
1428 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1429 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1433 spin_lock_irq(mlx4_tlock(dev
));
1434 r
= res_tracker_lookup(&tracker
->res_tree
[RES_QP
], qpn
);
1437 else if (r
->com
.owner
!= slave
)
1442 mlx4_dbg(dev
, "%s: failed RES_QP, 0x%llx\n",
1443 __func__
, r
->com
.res_id
);
1447 case RES_QP_RESERVED
:
1448 if (r
->com
.state
== RES_QP_MAPPED
&& !alloc
)
1451 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n", r
->com
.res_id
);
1456 if ((r
->com
.state
== RES_QP_RESERVED
&& alloc
) ||
1457 r
->com
.state
== RES_QP_HW
)
1460 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n",
1468 if (r
->com
.state
!= RES_QP_MAPPED
)
1476 r
->com
.from_state
= r
->com
.state
;
1477 r
->com
.to_state
= state
;
1478 r
->com
.state
= RES_QP_BUSY
;
1484 spin_unlock_irq(mlx4_tlock(dev
));
1489 static int mr_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1490 enum res_mpt_states state
, struct res_mpt
**mpt
)
1492 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1493 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1497 spin_lock_irq(mlx4_tlock(dev
));
1498 r
= res_tracker_lookup(&tracker
->res_tree
[RES_MPT
], index
);
1501 else if (r
->com
.owner
!= slave
)
1509 case RES_MPT_RESERVED
:
1510 if (r
->com
.state
!= RES_MPT_MAPPED
)
1514 case RES_MPT_MAPPED
:
1515 if (r
->com
.state
!= RES_MPT_RESERVED
&&
1516 r
->com
.state
!= RES_MPT_HW
)
1521 if (r
->com
.state
!= RES_MPT_MAPPED
)
1529 r
->com
.from_state
= r
->com
.state
;
1530 r
->com
.to_state
= state
;
1531 r
->com
.state
= RES_MPT_BUSY
;
1537 spin_unlock_irq(mlx4_tlock(dev
));
1542 static int eq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1543 enum res_eq_states state
, struct res_eq
**eq
)
1545 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1546 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1550 spin_lock_irq(mlx4_tlock(dev
));
1551 r
= res_tracker_lookup(&tracker
->res_tree
[RES_EQ
], index
);
1554 else if (r
->com
.owner
!= slave
)
1562 case RES_EQ_RESERVED
:
1563 if (r
->com
.state
!= RES_EQ_HW
)
1568 if (r
->com
.state
!= RES_EQ_RESERVED
)
1577 r
->com
.from_state
= r
->com
.state
;
1578 r
->com
.to_state
= state
;
1579 r
->com
.state
= RES_EQ_BUSY
;
1585 spin_unlock_irq(mlx4_tlock(dev
));
1590 static int cq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int cqn
,
1591 enum res_cq_states state
, struct res_cq
**cq
)
1593 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1594 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1598 spin_lock_irq(mlx4_tlock(dev
));
1599 r
= res_tracker_lookup(&tracker
->res_tree
[RES_CQ
], cqn
);
1602 } else if (r
->com
.owner
!= slave
) {
1604 } else if (state
== RES_CQ_ALLOCATED
) {
1605 if (r
->com
.state
!= RES_CQ_HW
)
1607 else if (atomic_read(&r
->ref_count
))
1611 } else if (state
!= RES_CQ_HW
|| r
->com
.state
!= RES_CQ_ALLOCATED
) {
1618 r
->com
.from_state
= r
->com
.state
;
1619 r
->com
.to_state
= state
;
1620 r
->com
.state
= RES_CQ_BUSY
;
1625 spin_unlock_irq(mlx4_tlock(dev
));
1630 static int srq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1631 enum res_srq_states state
, struct res_srq
**srq
)
1633 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1634 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1638 spin_lock_irq(mlx4_tlock(dev
));
1639 r
= res_tracker_lookup(&tracker
->res_tree
[RES_SRQ
], index
);
1642 } else if (r
->com
.owner
!= slave
) {
1644 } else if (state
== RES_SRQ_ALLOCATED
) {
1645 if (r
->com
.state
!= RES_SRQ_HW
)
1647 else if (atomic_read(&r
->ref_count
))
1649 } else if (state
!= RES_SRQ_HW
|| r
->com
.state
!= RES_SRQ_ALLOCATED
) {
1654 r
->com
.from_state
= r
->com
.state
;
1655 r
->com
.to_state
= state
;
1656 r
->com
.state
= RES_SRQ_BUSY
;
1661 spin_unlock_irq(mlx4_tlock(dev
));
1666 static void res_abort_move(struct mlx4_dev
*dev
, int slave
,
1667 enum mlx4_resource type
, int id
)
1669 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1670 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1671 struct res_common
*r
;
1673 spin_lock_irq(mlx4_tlock(dev
));
1674 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1675 if (r
&& (r
->owner
== slave
))
1676 r
->state
= r
->from_state
;
1677 spin_unlock_irq(mlx4_tlock(dev
));
1680 static void res_end_move(struct mlx4_dev
*dev
, int slave
,
1681 enum mlx4_resource type
, int id
)
1683 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1684 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1685 struct res_common
*r
;
1687 spin_lock_irq(mlx4_tlock(dev
));
1688 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1689 if (r
&& (r
->owner
== slave
))
1690 r
->state
= r
->to_state
;
1691 spin_unlock_irq(mlx4_tlock(dev
));
1694 static int valid_reserved(struct mlx4_dev
*dev
, int slave
, int qpn
)
1696 return mlx4_is_qp_reserved(dev
, qpn
) &&
1697 (mlx4_is_master(dev
) || mlx4_is_guest_proxy(dev
, slave
, qpn
));
1700 static int fw_reserved(struct mlx4_dev
*dev
, int qpn
)
1702 return qpn
< dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
1705 static int qp_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1706 u64 in_param
, u64
*out_param
)
1716 case RES_OP_RESERVE
:
1717 count
= get_param_l(&in_param
) & 0xffffff;
1718 /* Turn off all unsupported QP allocation flags that the
1719 * slave tries to set.
1721 flags
= (get_param_l(&in_param
) >> 24) & dev
->caps
.alloc_res_qp_mask
;
1722 align
= get_param_h(&in_param
);
1723 err
= mlx4_grant_resource(dev
, slave
, RES_QP
, count
, 0);
1727 err
= __mlx4_qp_reserve_range(dev
, count
, align
, &base
, flags
);
1729 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1733 err
= add_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1735 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1736 __mlx4_qp_release_range(dev
, base
, count
);
1739 set_param_l(out_param
, base
);
1741 case RES_OP_MAP_ICM
:
1742 qpn
= get_param_l(&in_param
) & 0x7fffff;
1743 if (valid_reserved(dev
, slave
, qpn
)) {
1744 err
= add_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1749 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
,
1754 if (!fw_reserved(dev
, qpn
)) {
1755 err
= __mlx4_qp_alloc_icm(dev
, qpn
, GFP_KERNEL
);
1757 res_abort_move(dev
, slave
, RES_QP
, qpn
);
1762 res_end_move(dev
, slave
, RES_QP
, qpn
);
1772 static int mtt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1773 u64 in_param
, u64
*out_param
)
1779 if (op
!= RES_OP_RESERVE_AND_MAP
)
1782 order
= get_param_l(&in_param
);
1784 err
= mlx4_grant_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1788 base
= __mlx4_alloc_mtt_range(dev
, order
);
1790 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1794 err
= add_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1796 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1797 __mlx4_free_mtt_range(dev
, base
, order
);
1799 set_param_l(out_param
, base
);
1805 static int mpt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1806 u64 in_param
, u64
*out_param
)
1811 struct res_mpt
*mpt
;
1814 case RES_OP_RESERVE
:
1815 err
= mlx4_grant_resource(dev
, slave
, RES_MPT
, 1, 0);
1819 index
= __mlx4_mpt_reserve(dev
);
1821 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1824 id
= index
& mpt_mask(dev
);
1826 err
= add_res_range(dev
, slave
, id
, 1, RES_MPT
, index
);
1828 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1829 __mlx4_mpt_release(dev
, index
);
1832 set_param_l(out_param
, index
);
1834 case RES_OP_MAP_ICM
:
1835 index
= get_param_l(&in_param
);
1836 id
= index
& mpt_mask(dev
);
1837 err
= mr_res_start_move_to(dev
, slave
, id
,
1838 RES_MPT_MAPPED
, &mpt
);
1842 err
= __mlx4_mpt_alloc_icm(dev
, mpt
->key
, GFP_KERNEL
);
1844 res_abort_move(dev
, slave
, RES_MPT
, id
);
1848 res_end_move(dev
, slave
, RES_MPT
, id
);
1854 static int cq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1855 u64 in_param
, u64
*out_param
)
1861 case RES_OP_RESERVE_AND_MAP
:
1862 err
= mlx4_grant_resource(dev
, slave
, RES_CQ
, 1, 0);
1866 err
= __mlx4_cq_alloc_icm(dev
, &cqn
);
1868 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1872 err
= add_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1874 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1875 __mlx4_cq_free_icm(dev
, cqn
);
1879 set_param_l(out_param
, cqn
);
1889 static int srq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1890 u64 in_param
, u64
*out_param
)
1896 case RES_OP_RESERVE_AND_MAP
:
1897 err
= mlx4_grant_resource(dev
, slave
, RES_SRQ
, 1, 0);
1901 err
= __mlx4_srq_alloc_icm(dev
, &srqn
);
1903 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1907 err
= add_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1909 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1910 __mlx4_srq_free_icm(dev
, srqn
);
1914 set_param_l(out_param
, srqn
);
1924 static int mac_find_smac_ix_in_slave(struct mlx4_dev
*dev
, int slave
, int port
,
1925 u8 smac_index
, u64
*mac
)
1927 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1928 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1929 struct list_head
*mac_list
=
1930 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1931 struct mac_res
*res
, *tmp
;
1933 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1934 if (res
->smac_index
== smac_index
&& res
->port
== (u8
) port
) {
1942 static int mac_add_to_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
, int port
, u8 smac_index
)
1944 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1945 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1946 struct list_head
*mac_list
=
1947 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1948 struct mac_res
*res
, *tmp
;
1950 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1951 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
1952 /* mac found. update ref count */
1958 if (mlx4_grant_resource(dev
, slave
, RES_MAC
, 1, port
))
1960 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
1962 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
1966 res
->port
= (u8
) port
;
1967 res
->smac_index
= smac_index
;
1969 list_add_tail(&res
->list
,
1970 &tracker
->slave_list
[slave
].res_list
[RES_MAC
]);
1974 static void mac_del_from_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
,
1977 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1978 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1979 struct list_head
*mac_list
=
1980 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1981 struct mac_res
*res
, *tmp
;
1983 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1984 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
1985 if (!--res
->ref_count
) {
1986 list_del(&res
->list
);
1987 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
1995 static void rem_slave_macs(struct mlx4_dev
*dev
, int slave
)
1997 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1998 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1999 struct list_head
*mac_list
=
2000 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
2001 struct mac_res
*res
, *tmp
;
2004 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
2005 list_del(&res
->list
);
2006 /* dereference the mac the num times the slave referenced it */
2007 for (i
= 0; i
< res
->ref_count
; i
++)
2008 __mlx4_unregister_mac(dev
, res
->port
, res
->mac
);
2009 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, res
->port
);
2014 static int mac_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2015 u64 in_param
, u64
*out_param
, int in_port
)
2022 if (op
!= RES_OP_RESERVE_AND_MAP
)
2025 port
= !in_port
? get_param_l(out_param
) : in_port
;
2026 port
= mlx4_slave_convert_port(
2033 err
= __mlx4_register_mac(dev
, port
, mac
);
2036 set_param_l(out_param
, err
);
2041 err
= mac_add_to_slave(dev
, slave
, mac
, port
, smac_index
);
2043 __mlx4_unregister_mac(dev
, port
, mac
);
2048 static int vlan_add_to_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
2049 int port
, int vlan_index
)
2051 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2052 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2053 struct list_head
*vlan_list
=
2054 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2055 struct vlan_res
*res
, *tmp
;
2057 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2058 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
2059 /* vlan found. update ref count */
2065 if (mlx4_grant_resource(dev
, slave
, RES_VLAN
, 1, port
))
2067 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
2069 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, port
);
2073 res
->port
= (u8
) port
;
2074 res
->vlan_index
= vlan_index
;
2076 list_add_tail(&res
->list
,
2077 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
]);
2082 static void vlan_del_from_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
2085 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2086 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2087 struct list_head
*vlan_list
=
2088 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2089 struct vlan_res
*res
, *tmp
;
2091 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2092 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
2093 if (!--res
->ref_count
) {
2094 list_del(&res
->list
);
2095 mlx4_release_resource(dev
, slave
, RES_VLAN
,
2104 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
)
2106 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2107 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2108 struct list_head
*vlan_list
=
2109 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
2110 struct vlan_res
*res
, *tmp
;
2113 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
2114 list_del(&res
->list
);
2115 /* dereference the vlan the num times the slave referenced it */
2116 for (i
= 0; i
< res
->ref_count
; i
++)
2117 __mlx4_unregister_vlan(dev
, res
->port
, res
->vlan
);
2118 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, res
->port
);
2123 static int vlan_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2124 u64 in_param
, u64
*out_param
, int in_port
)
2126 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2127 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2133 port
= !in_port
? get_param_l(out_param
) : in_port
;
2135 if (!port
|| op
!= RES_OP_RESERVE_AND_MAP
)
2138 port
= mlx4_slave_convert_port(
2143 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2144 if (!in_port
&& port
> 0 && port
<= dev
->caps
.num_ports
) {
2145 slave_state
[slave
].old_vlan_api
= true;
2149 vlan
= (u16
) in_param
;
2151 err
= __mlx4_register_vlan(dev
, port
, vlan
, &vlan_index
);
2153 set_param_l(out_param
, (u32
) vlan_index
);
2154 err
= vlan_add_to_slave(dev
, slave
, vlan
, port
, vlan_index
);
2156 __mlx4_unregister_vlan(dev
, port
, vlan
);
2161 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2162 u64 in_param
, u64
*out_param
, int port
)
2167 if (op
!= RES_OP_RESERVE
)
2170 err
= mlx4_grant_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2174 err
= __mlx4_counter_alloc(dev
, &index
);
2176 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2180 err
= add_res_range(dev
, slave
, index
, 1, RES_COUNTER
, port
);
2182 __mlx4_counter_free(dev
, index
);
2183 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2185 set_param_l(out_param
, index
);
2191 static int xrcdn_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2192 u64 in_param
, u64
*out_param
)
2197 if (op
!= RES_OP_RESERVE
)
2200 err
= __mlx4_xrcd_alloc(dev
, &xrcdn
);
2204 err
= add_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2206 __mlx4_xrcd_free(dev
, xrcdn
);
2208 set_param_l(out_param
, xrcdn
);
2213 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2214 struct mlx4_vhcr
*vhcr
,
2215 struct mlx4_cmd_mailbox
*inbox
,
2216 struct mlx4_cmd_mailbox
*outbox
,
2217 struct mlx4_cmd_info
*cmd
)
2220 int alop
= vhcr
->op_modifier
;
2222 switch (vhcr
->in_modifier
& 0xFF) {
2224 err
= qp_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2225 vhcr
->in_param
, &vhcr
->out_param
);
2229 err
= mtt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2230 vhcr
->in_param
, &vhcr
->out_param
);
2234 err
= mpt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2235 vhcr
->in_param
, &vhcr
->out_param
);
2239 err
= cq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2240 vhcr
->in_param
, &vhcr
->out_param
);
2244 err
= srq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2245 vhcr
->in_param
, &vhcr
->out_param
);
2249 err
= mac_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2250 vhcr
->in_param
, &vhcr
->out_param
,
2251 (vhcr
->in_modifier
>> 8) & 0xFF);
2255 err
= vlan_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2256 vhcr
->in_param
, &vhcr
->out_param
,
2257 (vhcr
->in_modifier
>> 8) & 0xFF);
2261 err
= counter_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2262 vhcr
->in_param
, &vhcr
->out_param
, 0);
2266 err
= xrcdn_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2267 vhcr
->in_param
, &vhcr
->out_param
);
2278 static int qp_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2287 case RES_OP_RESERVE
:
2288 base
= get_param_l(&in_param
) & 0x7fffff;
2289 count
= get_param_h(&in_param
);
2290 err
= rem_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
2293 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
2294 __mlx4_qp_release_range(dev
, base
, count
);
2296 case RES_OP_MAP_ICM
:
2297 qpn
= get_param_l(&in_param
) & 0x7fffff;
2298 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_RESERVED
,
2303 if (!fw_reserved(dev
, qpn
))
2304 __mlx4_qp_free_icm(dev
, qpn
);
2306 res_end_move(dev
, slave
, RES_QP
, qpn
);
2308 if (valid_reserved(dev
, slave
, qpn
))
2309 err
= rem_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
2318 static int mtt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2319 u64 in_param
, u64
*out_param
)
2325 if (op
!= RES_OP_RESERVE_AND_MAP
)
2328 base
= get_param_l(&in_param
);
2329 order
= get_param_h(&in_param
);
2330 err
= rem_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
2332 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
2333 __mlx4_free_mtt_range(dev
, base
, order
);
2338 static int mpt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2344 struct res_mpt
*mpt
;
2347 case RES_OP_RESERVE
:
2348 index
= get_param_l(&in_param
);
2349 id
= index
& mpt_mask(dev
);
2350 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2354 put_res(dev
, slave
, id
, RES_MPT
);
2356 err
= rem_res_range(dev
, slave
, id
, 1, RES_MPT
, 0);
2359 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
2360 __mlx4_mpt_release(dev
, index
);
2362 case RES_OP_MAP_ICM
:
2363 index
= get_param_l(&in_param
);
2364 id
= index
& mpt_mask(dev
);
2365 err
= mr_res_start_move_to(dev
, slave
, id
,
2366 RES_MPT_RESERVED
, &mpt
);
2370 __mlx4_mpt_free_icm(dev
, mpt
->key
);
2371 res_end_move(dev
, slave
, RES_MPT
, id
);
2381 static int cq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2382 u64 in_param
, u64
*out_param
)
2388 case RES_OP_RESERVE_AND_MAP
:
2389 cqn
= get_param_l(&in_param
);
2390 err
= rem_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
2394 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
2395 __mlx4_cq_free_icm(dev
, cqn
);
2406 static int srq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2407 u64 in_param
, u64
*out_param
)
2413 case RES_OP_RESERVE_AND_MAP
:
2414 srqn
= get_param_l(&in_param
);
2415 err
= rem_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
2419 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
2420 __mlx4_srq_free_icm(dev
, srqn
);
2431 static int mac_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2432 u64 in_param
, u64
*out_param
, int in_port
)
2438 case RES_OP_RESERVE_AND_MAP
:
2439 port
= !in_port
? get_param_l(out_param
) : in_port
;
2440 port
= mlx4_slave_convert_port(
2445 mac_del_from_slave(dev
, slave
, in_param
, port
);
2446 __mlx4_unregister_mac(dev
, port
, in_param
);
2457 static int vlan_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2458 u64 in_param
, u64
*out_param
, int port
)
2460 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2461 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2464 port
= mlx4_slave_convert_port(
2470 case RES_OP_RESERVE_AND_MAP
:
2471 if (slave_state
[slave
].old_vlan_api
)
2475 vlan_del_from_slave(dev
, slave
, in_param
, port
);
2476 __mlx4_unregister_vlan(dev
, port
, in_param
);
2486 static int counter_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2487 u64 in_param
, u64
*out_param
)
2492 if (op
!= RES_OP_RESERVE
)
2495 index
= get_param_l(&in_param
);
2496 if (index
== MLX4_SINK_COUNTER_INDEX(dev
))
2499 err
= rem_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
2503 __mlx4_counter_free(dev
, index
);
2504 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2509 static int xrcdn_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2510 u64 in_param
, u64
*out_param
)
2515 if (op
!= RES_OP_RESERVE
)
2518 xrcdn
= get_param_l(&in_param
);
2519 err
= rem_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2523 __mlx4_xrcd_free(dev
, xrcdn
);
2528 int mlx4_FREE_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2529 struct mlx4_vhcr
*vhcr
,
2530 struct mlx4_cmd_mailbox
*inbox
,
2531 struct mlx4_cmd_mailbox
*outbox
,
2532 struct mlx4_cmd_info
*cmd
)
2535 int alop
= vhcr
->op_modifier
;
2537 switch (vhcr
->in_modifier
& 0xFF) {
2539 err
= qp_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2544 err
= mtt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2545 vhcr
->in_param
, &vhcr
->out_param
);
2549 err
= mpt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2554 err
= cq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2555 vhcr
->in_param
, &vhcr
->out_param
);
2559 err
= srq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2560 vhcr
->in_param
, &vhcr
->out_param
);
2564 err
= mac_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2565 vhcr
->in_param
, &vhcr
->out_param
,
2566 (vhcr
->in_modifier
>> 8) & 0xFF);
2570 err
= vlan_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2571 vhcr
->in_param
, &vhcr
->out_param
,
2572 (vhcr
->in_modifier
>> 8) & 0xFF);
2576 err
= counter_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2577 vhcr
->in_param
, &vhcr
->out_param
);
2581 err
= xrcdn_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2582 vhcr
->in_param
, &vhcr
->out_param
);
2590 /* ugly but other choices are uglier */
2591 static int mr_phys_mpt(struct mlx4_mpt_entry
*mpt
)
2593 return (be32_to_cpu(mpt
->flags
) >> 9) & 1;
2596 static int mr_get_mtt_addr(struct mlx4_mpt_entry
*mpt
)
2598 return (int)be64_to_cpu(mpt
->mtt_addr
) & 0xfffffff8;
2601 static int mr_get_mtt_size(struct mlx4_mpt_entry
*mpt
)
2603 return be32_to_cpu(mpt
->mtt_sz
);
2606 static u32
mr_get_pd(struct mlx4_mpt_entry
*mpt
)
2608 return be32_to_cpu(mpt
->pd_flags
) & 0x00ffffff;
2611 static int mr_is_fmr(struct mlx4_mpt_entry
*mpt
)
2613 return be32_to_cpu(mpt
->pd_flags
) & MLX4_MPT_PD_FLAG_FAST_REG
;
2616 static int mr_is_bind_enabled(struct mlx4_mpt_entry
*mpt
)
2618 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_BIND_ENABLE
;
2621 static int mr_is_region(struct mlx4_mpt_entry
*mpt
)
2623 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_REGION
;
2626 static int qp_get_mtt_addr(struct mlx4_qp_context
*qpc
)
2628 return be32_to_cpu(qpc
->mtt_base_addr_l
) & 0xfffffff8;
2631 static int srq_get_mtt_addr(struct mlx4_srq_context
*srqc
)
2633 return be32_to_cpu(srqc
->mtt_base_addr_l
) & 0xfffffff8;
2636 static int qp_get_mtt_size(struct mlx4_qp_context
*qpc
)
2638 int page_shift
= (qpc
->log_page_size
& 0x3f) + 12;
2639 int log_sq_size
= (qpc
->sq_size_stride
>> 3) & 0xf;
2640 int log_sq_sride
= qpc
->sq_size_stride
& 7;
2641 int log_rq_size
= (qpc
->rq_size_stride
>> 3) & 0xf;
2642 int log_rq_stride
= qpc
->rq_size_stride
& 7;
2643 int srq
= (be32_to_cpu(qpc
->srqn
) >> 24) & 1;
2644 int rss
= (be32_to_cpu(qpc
->flags
) >> 13) & 1;
2645 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
2646 int xrc
= (ts
== MLX4_QP_ST_XRC
) ? 1 : 0;
2651 int page_offset
= (be32_to_cpu(qpc
->params2
) >> 6) & 0x3f;
2653 sq_size
= 1 << (log_sq_size
+ log_sq_sride
+ 4);
2654 rq_size
= (srq
|rss
|xrc
) ? 0 : (1 << (log_rq_size
+ log_rq_stride
+ 4));
2655 total_mem
= sq_size
+ rq_size
;
2657 roundup_pow_of_two((total_mem
+ (page_offset
<< 6)) >>
2663 static int check_mtt_range(struct mlx4_dev
*dev
, int slave
, int start
,
2664 int size
, struct res_mtt
*mtt
)
2666 int res_start
= mtt
->com
.res_id
;
2667 int res_size
= (1 << mtt
->order
);
2669 if (start
< res_start
|| start
+ size
> res_start
+ res_size
)
2674 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2675 struct mlx4_vhcr
*vhcr
,
2676 struct mlx4_cmd_mailbox
*inbox
,
2677 struct mlx4_cmd_mailbox
*outbox
,
2678 struct mlx4_cmd_info
*cmd
)
2681 int index
= vhcr
->in_modifier
;
2682 struct res_mtt
*mtt
;
2683 struct res_mpt
*mpt
;
2684 int mtt_base
= mr_get_mtt_addr(inbox
->buf
) / dev
->caps
.mtt_entry_sz
;
2690 id
= index
& mpt_mask(dev
);
2691 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_HW
, &mpt
);
2695 /* Disable memory windows for VFs. */
2696 if (!mr_is_region(inbox
->buf
)) {
2701 /* Make sure that the PD bits related to the slave id are zeros. */
2702 pd
= mr_get_pd(inbox
->buf
);
2703 pd_slave
= (pd
>> 17) & 0x7f;
2704 if (pd_slave
!= 0 && --pd_slave
!= slave
) {
2709 if (mr_is_fmr(inbox
->buf
)) {
2710 /* FMR and Bind Enable are forbidden in slave devices. */
2711 if (mr_is_bind_enabled(inbox
->buf
)) {
2715 /* FMR and Memory Windows are also forbidden. */
2716 if (!mr_is_region(inbox
->buf
)) {
2722 phys
= mr_phys_mpt(inbox
->buf
);
2724 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2728 err
= check_mtt_range(dev
, slave
, mtt_base
,
2729 mr_get_mtt_size(inbox
->buf
), mtt
);
2736 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2741 atomic_inc(&mtt
->ref_count
);
2742 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2745 res_end_move(dev
, slave
, RES_MPT
, id
);
2750 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2752 res_abort_move(dev
, slave
, RES_MPT
, id
);
2757 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2758 struct mlx4_vhcr
*vhcr
,
2759 struct mlx4_cmd_mailbox
*inbox
,
2760 struct mlx4_cmd_mailbox
*outbox
,
2761 struct mlx4_cmd_info
*cmd
)
2764 int index
= vhcr
->in_modifier
;
2765 struct res_mpt
*mpt
;
2768 id
= index
& mpt_mask(dev
);
2769 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_MAPPED
, &mpt
);
2773 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2778 atomic_dec(&mpt
->mtt
->ref_count
);
2780 res_end_move(dev
, slave
, RES_MPT
, id
);
2784 res_abort_move(dev
, slave
, RES_MPT
, id
);
2789 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2790 struct mlx4_vhcr
*vhcr
,
2791 struct mlx4_cmd_mailbox
*inbox
,
2792 struct mlx4_cmd_mailbox
*outbox
,
2793 struct mlx4_cmd_info
*cmd
)
2796 int index
= vhcr
->in_modifier
;
2797 struct res_mpt
*mpt
;
2800 id
= index
& mpt_mask(dev
);
2801 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2805 if (mpt
->com
.from_state
== RES_MPT_MAPPED
) {
2806 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2807 * that, the VF must read the MPT. But since the MPT entry memory is not
2808 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2809 * entry contents. To guarantee that the MPT cannot be changed, the driver
2810 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2811 * ownership fofollowing the change. The change here allows the VF to
2812 * perform QUERY_MPT also when the entry is in SW ownership.
2814 struct mlx4_mpt_entry
*mpt_entry
= mlx4_table_find(
2815 &mlx4_priv(dev
)->mr_table
.dmpt_table
,
2818 if (NULL
== mpt_entry
|| NULL
== outbox
->buf
) {
2823 memcpy(outbox
->buf
, mpt_entry
, sizeof(*mpt_entry
));
2826 } else if (mpt
->com
.from_state
== RES_MPT_HW
) {
2827 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2835 put_res(dev
, slave
, id
, RES_MPT
);
2839 static int qp_get_rcqn(struct mlx4_qp_context
*qpc
)
2841 return be32_to_cpu(qpc
->cqn_recv
) & 0xffffff;
2844 static int qp_get_scqn(struct mlx4_qp_context
*qpc
)
2846 return be32_to_cpu(qpc
->cqn_send
) & 0xffffff;
2849 static u32
qp_get_srqn(struct mlx4_qp_context
*qpc
)
2851 return be32_to_cpu(qpc
->srqn
) & 0x1ffffff;
2854 static void adjust_proxy_tun_qkey(struct mlx4_dev
*dev
, struct mlx4_vhcr
*vhcr
,
2855 struct mlx4_qp_context
*context
)
2857 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
2860 if (mlx4_get_parav_qkey(dev
, qpn
, &qkey
))
2863 /* adjust qkey in qp context */
2864 context
->qkey
= cpu_to_be32(qkey
);
2867 static int adjust_qp_sched_queue(struct mlx4_dev
*dev
, int slave
,
2868 struct mlx4_qp_context
*qpc
,
2869 struct mlx4_cmd_mailbox
*inbox
);
2871 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2872 struct mlx4_vhcr
*vhcr
,
2873 struct mlx4_cmd_mailbox
*inbox
,
2874 struct mlx4_cmd_mailbox
*outbox
,
2875 struct mlx4_cmd_info
*cmd
)
2878 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2879 struct res_mtt
*mtt
;
2881 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
2882 int mtt_base
= qp_get_mtt_addr(qpc
) / dev
->caps
.mtt_entry_sz
;
2883 int mtt_size
= qp_get_mtt_size(qpc
);
2886 int rcqn
= qp_get_rcqn(qpc
);
2887 int scqn
= qp_get_scqn(qpc
);
2888 u32 srqn
= qp_get_srqn(qpc
) & 0xffffff;
2889 int use_srq
= (qp_get_srqn(qpc
) >> 24) & 1;
2890 struct res_srq
*srq
;
2891 int local_qpn
= be32_to_cpu(qpc
->local_qpn
) & 0xffffff;
2893 err
= adjust_qp_sched_queue(dev
, slave
, qpc
, inbox
);
2897 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_HW
, &qp
, 0);
2900 qp
->local_qpn
= local_qpn
;
2901 qp
->sched_queue
= 0;
2903 qp
->vlan_control
= 0;
2905 qp
->pri_path_fl
= 0;
2908 qp
->qpc_flags
= be32_to_cpu(qpc
->flags
);
2910 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2914 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2918 err
= get_res(dev
, slave
, rcqn
, RES_CQ
, &rcq
);
2923 err
= get_res(dev
, slave
, scqn
, RES_CQ
, &scq
);
2930 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2935 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
2936 update_pkey_index(dev
, slave
, inbox
);
2937 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2940 atomic_inc(&mtt
->ref_count
);
2942 atomic_inc(&rcq
->ref_count
);
2944 atomic_inc(&scq
->ref_count
);
2948 put_res(dev
, slave
, scqn
, RES_CQ
);
2951 atomic_inc(&srq
->ref_count
);
2952 put_res(dev
, slave
, srqn
, RES_SRQ
);
2955 put_res(dev
, slave
, rcqn
, RES_CQ
);
2956 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2957 res_end_move(dev
, slave
, RES_QP
, qpn
);
2963 put_res(dev
, slave
, srqn
, RES_SRQ
);
2966 put_res(dev
, slave
, scqn
, RES_CQ
);
2968 put_res(dev
, slave
, rcqn
, RES_CQ
);
2970 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2972 res_abort_move(dev
, slave
, RES_QP
, qpn
);
2977 static int eq_get_mtt_addr(struct mlx4_eq_context
*eqc
)
2979 return be32_to_cpu(eqc
->mtt_base_addr_l
) & 0xfffffff8;
2982 static int eq_get_mtt_size(struct mlx4_eq_context
*eqc
)
2984 int log_eq_size
= eqc
->log_eq_size
& 0x1f;
2985 int page_shift
= (eqc
->log_page_size
& 0x3f) + 12;
2987 if (log_eq_size
+ 5 < page_shift
)
2990 return 1 << (log_eq_size
+ 5 - page_shift
);
2993 static int cq_get_mtt_addr(struct mlx4_cq_context
*cqc
)
2995 return be32_to_cpu(cqc
->mtt_base_addr_l
) & 0xfffffff8;
2998 static int cq_get_mtt_size(struct mlx4_cq_context
*cqc
)
3000 int log_cq_size
= (be32_to_cpu(cqc
->logsize_usrpage
) >> 24) & 0x1f;
3001 int page_shift
= (cqc
->log_page_size
& 0x3f) + 12;
3003 if (log_cq_size
+ 5 < page_shift
)
3006 return 1 << (log_cq_size
+ 5 - page_shift
);
3009 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3010 struct mlx4_vhcr
*vhcr
,
3011 struct mlx4_cmd_mailbox
*inbox
,
3012 struct mlx4_cmd_mailbox
*outbox
,
3013 struct mlx4_cmd_info
*cmd
)
3016 int eqn
= vhcr
->in_modifier
;
3017 int res_id
= (slave
<< 10) | eqn
;
3018 struct mlx4_eq_context
*eqc
= inbox
->buf
;
3019 int mtt_base
= eq_get_mtt_addr(eqc
) / dev
->caps
.mtt_entry_sz
;
3020 int mtt_size
= eq_get_mtt_size(eqc
);
3022 struct res_mtt
*mtt
;
3024 err
= add_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3027 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_HW
, &eq
);
3031 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3035 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
3039 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3043 atomic_inc(&mtt
->ref_count
);
3045 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3046 res_end_move(dev
, slave
, RES_EQ
, res_id
);
3050 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3052 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
3054 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3058 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev
*dev
, int slave
,
3059 struct mlx4_vhcr
*vhcr
,
3060 struct mlx4_cmd_mailbox
*inbox
,
3061 struct mlx4_cmd_mailbox
*outbox
,
3062 struct mlx4_cmd_info
*cmd
)
3065 u8 get
= vhcr
->op_modifier
;
3070 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3075 static int get_containing_mtt(struct mlx4_dev
*dev
, int slave
, int start
,
3076 int len
, struct res_mtt
**res
)
3078 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3079 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3080 struct res_mtt
*mtt
;
3083 spin_lock_irq(mlx4_tlock(dev
));
3084 list_for_each_entry(mtt
, &tracker
->slave_list
[slave
].res_list
[RES_MTT
],
3086 if (!check_mtt_range(dev
, slave
, start
, len
, mtt
)) {
3088 mtt
->com
.from_state
= mtt
->com
.state
;
3089 mtt
->com
.state
= RES_MTT_BUSY
;
3094 spin_unlock_irq(mlx4_tlock(dev
));
3099 static int verify_qp_parameters(struct mlx4_dev
*dev
,
3100 struct mlx4_vhcr
*vhcr
,
3101 struct mlx4_cmd_mailbox
*inbox
,
3102 enum qp_transition transition
, u8 slave
)
3106 struct mlx4_qp_context
*qp_ctx
;
3107 enum mlx4_qp_optpar optpar
;
3111 qp_ctx
= inbox
->buf
+ 8;
3112 qp_type
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
3113 optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
3115 if (slave
!= mlx4_master_func_num(dev
)) {
3116 qp_ctx
->params2
&= ~MLX4_QP_BIT_FPP
;
3117 /* setting QP rate-limit is disallowed for VFs */
3118 if (qp_ctx
->rate_limit_params
)
3124 case MLX4_QP_ST_XRC
:
3126 switch (transition
) {
3127 case QP_TRANS_INIT2RTR
:
3128 case QP_TRANS_RTR2RTS
:
3129 case QP_TRANS_RTS2RTS
:
3130 case QP_TRANS_SQD2SQD
:
3131 case QP_TRANS_SQD2RTS
:
3132 if (slave
!= mlx4_master_func_num(dev
))
3133 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
3134 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
3135 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
3136 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
3139 if (qp_ctx
->pri_path
.mgid_index
>= num_gids
)
3142 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
3143 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
3144 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
3145 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
3148 if (qp_ctx
->alt_path
.mgid_index
>= num_gids
)
3157 case MLX4_QP_ST_MLX
:
3158 qpn
= vhcr
->in_modifier
& 0x7fffff;
3159 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
3160 if (transition
== QP_TRANS_INIT2RTR
&&
3161 slave
!= mlx4_master_func_num(dev
) &&
3162 mlx4_is_qp_reserved(dev
, qpn
) &&
3163 !mlx4_vf_smi_enabled(dev
, slave
, port
)) {
3164 /* only enabled VFs may create MLX proxy QPs */
3165 mlx4_err(dev
, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3166 __func__
, slave
, port
);
3178 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev
*dev
, int slave
,
3179 struct mlx4_vhcr
*vhcr
,
3180 struct mlx4_cmd_mailbox
*inbox
,
3181 struct mlx4_cmd_mailbox
*outbox
,
3182 struct mlx4_cmd_info
*cmd
)
3184 struct mlx4_mtt mtt
;
3185 __be64
*page_list
= inbox
->buf
;
3186 u64
*pg_list
= (u64
*)page_list
;
3188 struct res_mtt
*rmtt
= NULL
;
3189 int start
= be64_to_cpu(page_list
[0]);
3190 int npages
= vhcr
->in_modifier
;
3193 err
= get_containing_mtt(dev
, slave
, start
, npages
, &rmtt
);
3197 /* Call the SW implementation of write_mtt:
3198 * - Prepare a dummy mtt struct
3199 * - Translate inbox contents to simple addresses in host endianness */
3200 mtt
.offset
= 0; /* TBD this is broken but I don't handle it since
3201 we don't really use it */
3204 for (i
= 0; i
< npages
; ++i
)
3205 pg_list
[i
+ 2] = (be64_to_cpu(page_list
[i
+ 2]) & ~1ULL);
3207 err
= __mlx4_write_mtt(dev
, &mtt
, be64_to_cpu(page_list
[0]), npages
,
3208 ((u64
*)page_list
+ 2));
3211 put_res(dev
, slave
, rmtt
->com
.res_id
, RES_MTT
);
3216 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3217 struct mlx4_vhcr
*vhcr
,
3218 struct mlx4_cmd_mailbox
*inbox
,
3219 struct mlx4_cmd_mailbox
*outbox
,
3220 struct mlx4_cmd_info
*cmd
)
3222 int eqn
= vhcr
->in_modifier
;
3223 int res_id
= eqn
| (slave
<< 10);
3227 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_RESERVED
, &eq
);
3231 err
= get_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
, NULL
);
3235 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3239 atomic_dec(&eq
->mtt
->ref_count
);
3240 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
3241 res_end_move(dev
, slave
, RES_EQ
, res_id
);
3242 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3247 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
3249 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
3254 int mlx4_GEN_EQE(struct mlx4_dev
*dev
, int slave
, struct mlx4_eqe
*eqe
)
3256 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3257 struct mlx4_slave_event_eq_info
*event_eq
;
3258 struct mlx4_cmd_mailbox
*mailbox
;
3259 u32 in_modifier
= 0;
3264 if (!priv
->mfunc
.master
.slave_state
)
3267 /* check for slave valid, slave not PF, and slave active */
3268 if (slave
< 0 || slave
> dev
->persist
->num_vfs
||
3269 slave
== dev
->caps
.function
||
3270 !priv
->mfunc
.master
.slave_state
[slave
].active
)
3273 event_eq
= &priv
->mfunc
.master
.slave_state
[slave
].event_eq
[eqe
->type
];
3275 /* Create the event only if the slave is registered */
3276 if (event_eq
->eqn
< 0)
3279 mutex_lock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3280 res_id
= (slave
<< 10) | event_eq
->eqn
;
3281 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &req
);
3285 if (req
->com
.from_state
!= RES_EQ_HW
) {
3290 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
3291 if (IS_ERR(mailbox
)) {
3292 err
= PTR_ERR(mailbox
);
3296 if (eqe
->type
== MLX4_EVENT_TYPE_CMD
) {
3298 eqe
->event
.cmd
.token
= cpu_to_be16(event_eq
->token
);
3301 memcpy(mailbox
->buf
, (u8
*) eqe
, 28);
3303 in_modifier
= (slave
& 0xff) | ((event_eq
->eqn
& 0x3ff) << 16);
3305 err
= mlx4_cmd(dev
, mailbox
->dma
, in_modifier
, 0,
3306 MLX4_CMD_GEN_EQE
, MLX4_CMD_TIME_CLASS_B
,
3309 put_res(dev
, slave
, res_id
, RES_EQ
);
3310 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3311 mlx4_free_cmd_mailbox(dev
, mailbox
);
3315 put_res(dev
, slave
, res_id
, RES_EQ
);
3318 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3322 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3323 struct mlx4_vhcr
*vhcr
,
3324 struct mlx4_cmd_mailbox
*inbox
,
3325 struct mlx4_cmd_mailbox
*outbox
,
3326 struct mlx4_cmd_info
*cmd
)
3328 int eqn
= vhcr
->in_modifier
;
3329 int res_id
= eqn
| (slave
<< 10);
3333 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &eq
);
3337 if (eq
->com
.from_state
!= RES_EQ_HW
) {
3342 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3345 put_res(dev
, slave
, res_id
, RES_EQ
);
3349 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3350 struct mlx4_vhcr
*vhcr
,
3351 struct mlx4_cmd_mailbox
*inbox
,
3352 struct mlx4_cmd_mailbox
*outbox
,
3353 struct mlx4_cmd_info
*cmd
)
3356 int cqn
= vhcr
->in_modifier
;
3357 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3358 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3359 struct res_cq
*cq
= NULL
;
3360 struct res_mtt
*mtt
;
3362 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_HW
, &cq
);
3365 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3368 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3371 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3374 atomic_inc(&mtt
->ref_count
);
3376 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3377 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3381 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3383 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3387 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3388 struct mlx4_vhcr
*vhcr
,
3389 struct mlx4_cmd_mailbox
*inbox
,
3390 struct mlx4_cmd_mailbox
*outbox
,
3391 struct mlx4_cmd_info
*cmd
)
3394 int cqn
= vhcr
->in_modifier
;
3395 struct res_cq
*cq
= NULL
;
3397 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_ALLOCATED
, &cq
);
3400 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3403 atomic_dec(&cq
->mtt
->ref_count
);
3404 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3408 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3412 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3413 struct mlx4_vhcr
*vhcr
,
3414 struct mlx4_cmd_mailbox
*inbox
,
3415 struct mlx4_cmd_mailbox
*outbox
,
3416 struct mlx4_cmd_info
*cmd
)
3418 int cqn
= vhcr
->in_modifier
;
3422 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3426 if (cq
->com
.from_state
!= RES_CQ_HW
)
3429 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3431 put_res(dev
, slave
, cqn
, RES_CQ
);
3436 static int handle_resize(struct mlx4_dev
*dev
, int slave
,
3437 struct mlx4_vhcr
*vhcr
,
3438 struct mlx4_cmd_mailbox
*inbox
,
3439 struct mlx4_cmd_mailbox
*outbox
,
3440 struct mlx4_cmd_info
*cmd
,
3444 struct res_mtt
*orig_mtt
;
3445 struct res_mtt
*mtt
;
3446 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3447 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3449 err
= get_res(dev
, slave
, cq
->mtt
->com
.res_id
, RES_MTT
, &orig_mtt
);
3453 if (orig_mtt
!= cq
->mtt
) {
3458 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3462 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3465 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3468 atomic_dec(&orig_mtt
->ref_count
);
3469 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3470 atomic_inc(&mtt
->ref_count
);
3472 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3476 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3478 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3484 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3485 struct mlx4_vhcr
*vhcr
,
3486 struct mlx4_cmd_mailbox
*inbox
,
3487 struct mlx4_cmd_mailbox
*outbox
,
3488 struct mlx4_cmd_info
*cmd
)
3490 int cqn
= vhcr
->in_modifier
;
3494 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3498 if (cq
->com
.from_state
!= RES_CQ_HW
)
3501 if (vhcr
->op_modifier
== 0) {
3502 err
= handle_resize(dev
, slave
, vhcr
, inbox
, outbox
, cmd
, cq
);
3506 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3508 put_res(dev
, slave
, cqn
, RES_CQ
);
3513 static int srq_get_mtt_size(struct mlx4_srq_context
*srqc
)
3515 int log_srq_size
= (be32_to_cpu(srqc
->state_logsize_srqn
) >> 24) & 0xf;
3516 int log_rq_stride
= srqc
->logstride
& 7;
3517 int page_shift
= (srqc
->log_page_size
& 0x3f) + 12;
3519 if (log_srq_size
+ log_rq_stride
+ 4 < page_shift
)
3522 return 1 << (log_srq_size
+ log_rq_stride
+ 4 - page_shift
);
3525 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3526 struct mlx4_vhcr
*vhcr
,
3527 struct mlx4_cmd_mailbox
*inbox
,
3528 struct mlx4_cmd_mailbox
*outbox
,
3529 struct mlx4_cmd_info
*cmd
)
3532 int srqn
= vhcr
->in_modifier
;
3533 struct res_mtt
*mtt
;
3534 struct res_srq
*srq
= NULL
;
3535 struct mlx4_srq_context
*srqc
= inbox
->buf
;
3536 int mtt_base
= srq_get_mtt_addr(srqc
) / dev
->caps
.mtt_entry_sz
;
3538 if (srqn
!= (be32_to_cpu(srqc
->state_logsize_srqn
) & 0xffffff))
3541 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_HW
, &srq
);
3544 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3547 err
= check_mtt_range(dev
, slave
, mtt_base
, srq_get_mtt_size(srqc
),
3552 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3556 atomic_inc(&mtt
->ref_count
);
3558 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3559 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3563 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3565 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3570 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3571 struct mlx4_vhcr
*vhcr
,
3572 struct mlx4_cmd_mailbox
*inbox
,
3573 struct mlx4_cmd_mailbox
*outbox
,
3574 struct mlx4_cmd_info
*cmd
)
3577 int srqn
= vhcr
->in_modifier
;
3578 struct res_srq
*srq
= NULL
;
3580 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_ALLOCATED
, &srq
);
3583 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3586 atomic_dec(&srq
->mtt
->ref_count
);
3588 atomic_dec(&srq
->cq
->ref_count
);
3589 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3594 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3599 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3600 struct mlx4_vhcr
*vhcr
,
3601 struct mlx4_cmd_mailbox
*inbox
,
3602 struct mlx4_cmd_mailbox
*outbox
,
3603 struct mlx4_cmd_info
*cmd
)
3606 int srqn
= vhcr
->in_modifier
;
3607 struct res_srq
*srq
;
3609 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3612 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3616 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3618 put_res(dev
, slave
, srqn
, RES_SRQ
);
3622 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3623 struct mlx4_vhcr
*vhcr
,
3624 struct mlx4_cmd_mailbox
*inbox
,
3625 struct mlx4_cmd_mailbox
*outbox
,
3626 struct mlx4_cmd_info
*cmd
)
3629 int srqn
= vhcr
->in_modifier
;
3630 struct res_srq
*srq
;
3632 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3636 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3641 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3643 put_res(dev
, slave
, srqn
, RES_SRQ
);
3647 int mlx4_GEN_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3648 struct mlx4_vhcr
*vhcr
,
3649 struct mlx4_cmd_mailbox
*inbox
,
3650 struct mlx4_cmd_mailbox
*outbox
,
3651 struct mlx4_cmd_info
*cmd
)
3654 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3657 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3660 if (qp
->com
.from_state
!= RES_QP_HW
) {
3665 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3667 put_res(dev
, slave
, qpn
, RES_QP
);
3671 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3672 struct mlx4_vhcr
*vhcr
,
3673 struct mlx4_cmd_mailbox
*inbox
,
3674 struct mlx4_cmd_mailbox
*outbox
,
3675 struct mlx4_cmd_info
*cmd
)
3677 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3678 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3679 update_pkey_index(dev
, slave
, inbox
);
3680 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3683 static int adjust_qp_sched_queue(struct mlx4_dev
*dev
, int slave
,
3684 struct mlx4_qp_context
*qpc
,
3685 struct mlx4_cmd_mailbox
*inbox
)
3687 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*)inbox
->buf
);
3689 int port
= mlx4_slave_convert_port(
3690 dev
, slave
, (qpc
->pri_path
.sched_queue
>> 6 & 1) + 1) - 1;
3695 pri_sched_queue
= (qpc
->pri_path
.sched_queue
& ~(1 << 6)) |
3698 if (optpar
& (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
| MLX4_QP_OPTPAR_SCHED_QUEUE
) ||
3699 qpc
->pri_path
.sched_queue
|| mlx4_is_eth(dev
, port
+ 1)) {
3700 qpc
->pri_path
.sched_queue
= pri_sched_queue
;
3703 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
3704 port
= mlx4_slave_convert_port(
3705 dev
, slave
, (qpc
->alt_path
.sched_queue
>> 6 & 1)
3709 qpc
->alt_path
.sched_queue
=
3710 (qpc
->alt_path
.sched_queue
& ~(1 << 6)) |
3716 static int roce_verify_mac(struct mlx4_dev
*dev
, int slave
,
3717 struct mlx4_qp_context
*qpc
,
3718 struct mlx4_cmd_mailbox
*inbox
)
3722 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
3723 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
3726 port
= (sched
>> 6 & 1) + 1;
3727 if (mlx4_is_eth(dev
, port
) && (ts
!= MLX4_QP_ST_MLX
)) {
3728 smac_ix
= qpc
->pri_path
.grh_mylmc
& 0x7f;
3729 if (mac_find_smac_ix_in_slave(dev
, slave
, port
, smac_ix
, &mac
))
3735 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3736 struct mlx4_vhcr
*vhcr
,
3737 struct mlx4_cmd_mailbox
*inbox
,
3738 struct mlx4_cmd_mailbox
*outbox
,
3739 struct mlx4_cmd_info
*cmd
)
3742 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
3743 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3745 u8 orig_sched_queue
;
3746 __be32 orig_param3
= qpc
->param3
;
3747 u8 orig_vlan_control
= qpc
->pri_path
.vlan_control
;
3748 u8 orig_fvl_rx
= qpc
->pri_path
.fvl_rx
;
3749 u8 orig_pri_path_fl
= qpc
->pri_path
.fl
;
3750 u8 orig_vlan_index
= qpc
->pri_path
.vlan_index
;
3751 u8 orig_feup
= qpc
->pri_path
.feup
;
3753 err
= adjust_qp_sched_queue(dev
, slave
, qpc
, inbox
);
3756 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_INIT2RTR
, slave
);
3760 if (roce_verify_mac(dev
, slave
, qpc
, inbox
))
3763 update_pkey_index(dev
, slave
, inbox
);
3764 update_gid(dev
, inbox
, (u8
)slave
);
3765 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
3766 orig_sched_queue
= qpc
->pri_path
.sched_queue
;
3767 err
= update_vport_qp_param(dev
, inbox
, slave
, qpn
);
3771 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3774 if (qp
->com
.from_state
!= RES_QP_HW
) {
3779 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3781 /* if no error, save sched queue value passed in by VF. This is
3782 * essentially the QOS value provided by the VF. This will be useful
3783 * if we allow dynamic changes from VST back to VGT
3786 qp
->sched_queue
= orig_sched_queue
;
3787 qp
->param3
= orig_param3
;
3788 qp
->vlan_control
= orig_vlan_control
;
3789 qp
->fvl_rx
= orig_fvl_rx
;
3790 qp
->pri_path_fl
= orig_pri_path_fl
;
3791 qp
->vlan_index
= orig_vlan_index
;
3792 qp
->feup
= orig_feup
;
3794 put_res(dev
, slave
, qpn
, RES_QP
);
3798 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3799 struct mlx4_vhcr
*vhcr
,
3800 struct mlx4_cmd_mailbox
*inbox
,
3801 struct mlx4_cmd_mailbox
*outbox
,
3802 struct mlx4_cmd_info
*cmd
)
3805 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3807 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3810 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_RTR2RTS
, slave
);
3814 update_pkey_index(dev
, slave
, inbox
);
3815 update_gid(dev
, inbox
, (u8
)slave
);
3816 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3817 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3820 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3821 struct mlx4_vhcr
*vhcr
,
3822 struct mlx4_cmd_mailbox
*inbox
,
3823 struct mlx4_cmd_mailbox
*outbox
,
3824 struct mlx4_cmd_info
*cmd
)
3827 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3829 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3832 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_RTS2RTS
, slave
);
3836 update_pkey_index(dev
, slave
, inbox
);
3837 update_gid(dev
, inbox
, (u8
)slave
);
3838 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3839 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3843 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3844 struct mlx4_vhcr
*vhcr
,
3845 struct mlx4_cmd_mailbox
*inbox
,
3846 struct mlx4_cmd_mailbox
*outbox
,
3847 struct mlx4_cmd_info
*cmd
)
3849 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3850 int err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3853 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3854 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3857 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3858 struct mlx4_vhcr
*vhcr
,
3859 struct mlx4_cmd_mailbox
*inbox
,
3860 struct mlx4_cmd_mailbox
*outbox
,
3861 struct mlx4_cmd_info
*cmd
)
3864 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3866 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3869 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_SQD2SQD
, slave
);
3873 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3874 update_gid(dev
, inbox
, (u8
)slave
);
3875 update_pkey_index(dev
, slave
, inbox
);
3876 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3879 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3880 struct mlx4_vhcr
*vhcr
,
3881 struct mlx4_cmd_mailbox
*inbox
,
3882 struct mlx4_cmd_mailbox
*outbox
,
3883 struct mlx4_cmd_info
*cmd
)
3886 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3888 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3891 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_SQD2RTS
, slave
);
3895 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3896 update_gid(dev
, inbox
, (u8
)slave
);
3897 update_pkey_index(dev
, slave
, inbox
);
3898 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3901 int mlx4_2RST_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3902 struct mlx4_vhcr
*vhcr
,
3903 struct mlx4_cmd_mailbox
*inbox
,
3904 struct mlx4_cmd_mailbox
*outbox
,
3905 struct mlx4_cmd_info
*cmd
)
3908 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3911 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
, &qp
, 0);
3914 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3918 atomic_dec(&qp
->mtt
->ref_count
);
3919 atomic_dec(&qp
->rcq
->ref_count
);
3920 atomic_dec(&qp
->scq
->ref_count
);
3922 atomic_dec(&qp
->srq
->ref_count
);
3923 res_end_move(dev
, slave
, RES_QP
, qpn
);
3927 res_abort_move(dev
, slave
, RES_QP
, qpn
);
3932 static struct res_gid
*find_gid(struct mlx4_dev
*dev
, int slave
,
3933 struct res_qp
*rqp
, u8
*gid
)
3935 struct res_gid
*res
;
3937 list_for_each_entry(res
, &rqp
->mcg_list
, list
) {
3938 if (!memcmp(res
->gid
, gid
, 16))
3944 static int add_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
3945 u8
*gid
, enum mlx4_protocol prot
,
3946 enum mlx4_steer_type steer
, u64 reg_id
)
3948 struct res_gid
*res
;
3951 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
3955 spin_lock_irq(&rqp
->mcg_spl
);
3956 if (find_gid(dev
, slave
, rqp
, gid
)) {
3960 memcpy(res
->gid
, gid
, 16);
3963 res
->reg_id
= reg_id
;
3964 list_add_tail(&res
->list
, &rqp
->mcg_list
);
3967 spin_unlock_irq(&rqp
->mcg_spl
);
3972 static int rem_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
3973 u8
*gid
, enum mlx4_protocol prot
,
3974 enum mlx4_steer_type steer
, u64
*reg_id
)
3976 struct res_gid
*res
;
3979 spin_lock_irq(&rqp
->mcg_spl
);
3980 res
= find_gid(dev
, slave
, rqp
, gid
);
3981 if (!res
|| res
->prot
!= prot
|| res
->steer
!= steer
)
3984 *reg_id
= res
->reg_id
;
3985 list_del(&res
->list
);
3989 spin_unlock_irq(&rqp
->mcg_spl
);
3994 static int qp_attach(struct mlx4_dev
*dev
, int slave
, struct mlx4_qp
*qp
,
3995 u8 gid
[16], int block_loopback
, enum mlx4_protocol prot
,
3996 enum mlx4_steer_type type
, u64
*reg_id
)
3998 switch (dev
->caps
.steering_mode
) {
3999 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
4000 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4003 return mlx4_trans_to_dmfs_attach(dev
, qp
, gid
, port
,
4004 block_loopback
, prot
,
4007 case MLX4_STEERING_MODE_B0
:
4008 if (prot
== MLX4_PROT_ETH
) {
4009 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4014 return mlx4_qp_attach_common(dev
, qp
, gid
,
4015 block_loopback
, prot
, type
);
4021 static int qp_detach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
,
4022 u8 gid
[16], enum mlx4_protocol prot
,
4023 enum mlx4_steer_type type
, u64 reg_id
)
4025 switch (dev
->caps
.steering_mode
) {
4026 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
4027 return mlx4_flow_detach(dev
, reg_id
);
4028 case MLX4_STEERING_MODE_B0
:
4029 return mlx4_qp_detach_common(dev
, qp
, gid
, prot
, type
);
4035 static int mlx4_adjust_port(struct mlx4_dev
*dev
, int slave
,
4036 u8
*gid
, enum mlx4_protocol prot
)
4040 if (prot
!= MLX4_PROT_ETH
)
4043 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_B0
||
4044 dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
4045 real_port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
4054 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4055 struct mlx4_vhcr
*vhcr
,
4056 struct mlx4_cmd_mailbox
*inbox
,
4057 struct mlx4_cmd_mailbox
*outbox
,
4058 struct mlx4_cmd_info
*cmd
)
4060 struct mlx4_qp qp
; /* dummy for calling attach/detach */
4061 u8
*gid
= inbox
->buf
;
4062 enum mlx4_protocol prot
= (vhcr
->in_modifier
>> 28) & 0x7;
4067 int attach
= vhcr
->op_modifier
;
4068 int block_loopback
= vhcr
->in_modifier
>> 31;
4069 u8 steer_type_mask
= 2;
4070 enum mlx4_steer_type type
= (gid
[7] & steer_type_mask
) >> 1;
4072 qpn
= vhcr
->in_modifier
& 0xffffff;
4073 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4079 err
= qp_attach(dev
, slave
, &qp
, gid
, block_loopback
, prot
,
4082 pr_err("Fail to attach rule to qp 0x%x\n", qpn
);
4085 err
= add_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, reg_id
);
4089 err
= mlx4_adjust_port(dev
, slave
, gid
, prot
);
4093 err
= rem_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, ®_id
);
4097 err
= qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
4099 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4102 put_res(dev
, slave
, qpn
, RES_QP
);
4106 qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
4108 put_res(dev
, slave
, qpn
, RES_QP
);
4113 * MAC validation for Flow Steering rules.
4114 * VF can attach rules only with a mac address which is assigned to it.
4116 static int validate_eth_header_mac(int slave
, struct _rule_hw
*eth_header
,
4117 struct list_head
*rlist
)
4119 struct mac_res
*res
, *tmp
;
4122 /* make sure it isn't multicast or broadcast mac*/
4123 if (!is_multicast_ether_addr(eth_header
->eth
.dst_mac
) &&
4124 !is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
4125 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
4126 be_mac
= cpu_to_be64(res
->mac
<< 16);
4127 if (ether_addr_equal((u8
*)&be_mac
, eth_header
->eth
.dst_mac
))
4130 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4131 eth_header
->eth
.dst_mac
, slave
);
4137 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl
*ctrl
,
4138 struct _rule_hw
*eth_header
)
4140 if (is_multicast_ether_addr(eth_header
->eth
.dst_mac
) ||
4141 is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
4142 struct mlx4_net_trans_rule_hw_eth
*eth
=
4143 (struct mlx4_net_trans_rule_hw_eth
*)eth_header
;
4144 struct _rule_hw
*next_rule
= (struct _rule_hw
*)(eth
+ 1);
4145 bool last_rule
= next_rule
->size
== 0 && next_rule
->id
== 0 &&
4146 next_rule
->rsvd
== 0;
4149 ctrl
->prio
= cpu_to_be16(MLX4_DOMAIN_NIC
);
4154 * In case of missing eth header, append eth header with a MAC address
4155 * assigned to the VF.
4157 static int add_eth_header(struct mlx4_dev
*dev
, int slave
,
4158 struct mlx4_cmd_mailbox
*inbox
,
4159 struct list_head
*rlist
, int header_id
)
4161 struct mac_res
*res
, *tmp
;
4163 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
4164 struct mlx4_net_trans_rule_hw_eth
*eth_header
;
4165 struct mlx4_net_trans_rule_hw_ipv4
*ip_header
;
4166 struct mlx4_net_trans_rule_hw_tcp_udp
*l4_header
;
4168 __be64 mac_msk
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
4170 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
4172 eth_header
= (struct mlx4_net_trans_rule_hw_eth
*)(ctrl
+ 1);
4174 /* Clear a space in the inbox for eth header */
4175 switch (header_id
) {
4176 case MLX4_NET_TRANS_RULE_ID_IPV4
:
4178 (struct mlx4_net_trans_rule_hw_ipv4
*)(eth_header
+ 1);
4179 memmove(ip_header
, eth_header
,
4180 sizeof(*ip_header
) + sizeof(*l4_header
));
4182 case MLX4_NET_TRANS_RULE_ID_TCP
:
4183 case MLX4_NET_TRANS_RULE_ID_UDP
:
4184 l4_header
= (struct mlx4_net_trans_rule_hw_tcp_udp
*)
4186 memmove(l4_header
, eth_header
, sizeof(*l4_header
));
4191 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
4192 if (port
== res
->port
) {
4193 be_mac
= cpu_to_be64(res
->mac
<< 16);
4198 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4203 memset(eth_header
, 0, sizeof(*eth_header
));
4204 eth_header
->size
= sizeof(*eth_header
) >> 2;
4205 eth_header
->id
= cpu_to_be16(__sw_id_hw
[MLX4_NET_TRANS_RULE_ID_ETH
]);
4206 memcpy(eth_header
->dst_mac
, &be_mac
, ETH_ALEN
);
4207 memcpy(eth_header
->dst_mac_msk
, &mac_msk
, ETH_ALEN
);
4213 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4214 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
4215 struct mlx4_vhcr
*vhcr
,
4216 struct mlx4_cmd_mailbox
*inbox
,
4217 struct mlx4_cmd_mailbox
*outbox
,
4218 struct mlx4_cmd_info
*cmd_info
)
4221 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
4225 u64 pri_addr_path_mask
;
4226 struct mlx4_update_qp_context
*cmd
;
4229 cmd
= (struct mlx4_update_qp_context
*)inbox
->buf
;
4231 pri_addr_path_mask
= be64_to_cpu(cmd
->primary_addr_path_mask
);
4232 if (cmd
->qp_mask
|| cmd
->secondary_addr_path_mask
||
4233 (pri_addr_path_mask
& ~MLX4_UPD_QP_PATH_MASK_SUPPORTED
))
4236 /* Just change the smac for the QP */
4237 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4239 mlx4_err(dev
, "Updating qpn 0x%x for slave %d rejected\n", qpn
, slave
);
4243 port
= (rqp
->sched_queue
>> 6 & 1) + 1;
4245 if (pri_addr_path_mask
& (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX
)) {
4246 smac_index
= cmd
->qp_context
.pri_path
.grh_mylmc
;
4247 err
= mac_find_smac_ix_in_slave(dev
, slave
, port
,
4251 mlx4_err(dev
, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4257 err
= mlx4_cmd(dev
, inbox
->dma
,
4258 vhcr
->in_modifier
, 0,
4259 MLX4_CMD_UPDATE_QP
, MLX4_CMD_TIME_CLASS_A
,
4262 mlx4_err(dev
, "Failed to update qpn on qpn 0x%x, command failed\n", qpn
);
4267 put_res(dev
, slave
, qpn
, RES_QP
);
4271 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4272 struct mlx4_vhcr
*vhcr
,
4273 struct mlx4_cmd_mailbox
*inbox
,
4274 struct mlx4_cmd_mailbox
*outbox
,
4275 struct mlx4_cmd_info
*cmd
)
4278 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4279 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4280 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
4284 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
4285 struct _rule_hw
*rule_header
;
4288 if (dev
->caps
.steering_mode
!=
4289 MLX4_STEERING_MODE_DEVICE_MANAGED
)
4292 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
4293 ctrl
->port
= mlx4_slave_convert_port(dev
, slave
, ctrl
->port
);
4294 if (ctrl
->port
<= 0)
4296 qpn
= be32_to_cpu(ctrl
->qpn
) & 0xffffff;
4297 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4299 pr_err("Steering rule with qpn 0x%x rejected\n", qpn
);
4302 rule_header
= (struct _rule_hw
*)(ctrl
+ 1);
4303 header_id
= map_hw_to_sw_id(be16_to_cpu(rule_header
->id
));
4305 if (header_id
== MLX4_NET_TRANS_RULE_ID_ETH
)
4306 handle_eth_header_mcast_prio(ctrl
, rule_header
);
4308 if (slave
== dev
->caps
.function
)
4311 switch (header_id
) {
4312 case MLX4_NET_TRANS_RULE_ID_ETH
:
4313 if (validate_eth_header_mac(slave
, rule_header
, rlist
)) {
4318 case MLX4_NET_TRANS_RULE_ID_IB
:
4320 case MLX4_NET_TRANS_RULE_ID_IPV4
:
4321 case MLX4_NET_TRANS_RULE_ID_TCP
:
4322 case MLX4_NET_TRANS_RULE_ID_UDP
:
4323 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4324 if (add_eth_header(dev
, slave
, inbox
, rlist
, header_id
)) {
4328 vhcr
->in_modifier
+=
4329 sizeof(struct mlx4_net_trans_rule_hw_eth
) >> 2;
4332 pr_err("Corrupted mailbox\n");
4338 err
= mlx4_cmd_imm(dev
, inbox
->dma
, &vhcr
->out_param
,
4339 vhcr
->in_modifier
, 0,
4340 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
4345 err
= add_res_range(dev
, slave
, vhcr
->out_param
, 1, RES_FS_RULE
, qpn
);
4347 mlx4_err(dev
, "Fail to add flow steering resources\n");
4349 mlx4_cmd(dev
, vhcr
->out_param
, 0, 0,
4350 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
4354 atomic_inc(&rqp
->ref_count
);
4356 put_res(dev
, slave
, qpn
, RES_QP
);
4360 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4361 struct mlx4_vhcr
*vhcr
,
4362 struct mlx4_cmd_mailbox
*inbox
,
4363 struct mlx4_cmd_mailbox
*outbox
,
4364 struct mlx4_cmd_info
*cmd
)
4368 struct res_fs_rule
*rrule
;
4370 if (dev
->caps
.steering_mode
!=
4371 MLX4_STEERING_MODE_DEVICE_MANAGED
)
4374 err
= get_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
, &rrule
);
4377 /* Release the rule form busy state before removal */
4378 put_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
);
4379 err
= get_res(dev
, slave
, rrule
->qpn
, RES_QP
, &rqp
);
4383 err
= rem_res_range(dev
, slave
, vhcr
->in_param
, 1, RES_FS_RULE
, 0);
4385 mlx4_err(dev
, "Fail to remove flow steering resources\n");
4389 err
= mlx4_cmd(dev
, vhcr
->in_param
, 0, 0,
4390 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
4393 atomic_dec(&rqp
->ref_count
);
4395 put_res(dev
, slave
, rrule
->qpn
, RES_QP
);
4400 BUSY_MAX_RETRIES
= 10
4403 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev
*dev
, int slave
,
4404 struct mlx4_vhcr
*vhcr
,
4405 struct mlx4_cmd_mailbox
*inbox
,
4406 struct mlx4_cmd_mailbox
*outbox
,
4407 struct mlx4_cmd_info
*cmd
)
4410 int index
= vhcr
->in_modifier
& 0xffff;
4412 err
= get_res(dev
, slave
, index
, RES_COUNTER
, NULL
);
4416 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
4417 put_res(dev
, slave
, index
, RES_COUNTER
);
4421 static void detach_qp(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
)
4423 struct res_gid
*rgid
;
4424 struct res_gid
*tmp
;
4425 struct mlx4_qp qp
; /* dummy for calling attach/detach */
4427 list_for_each_entry_safe(rgid
, tmp
, &rqp
->mcg_list
, list
) {
4428 switch (dev
->caps
.steering_mode
) {
4429 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
4430 mlx4_flow_detach(dev
, rgid
->reg_id
);
4432 case MLX4_STEERING_MODE_B0
:
4433 qp
.qpn
= rqp
->local_qpn
;
4434 (void) mlx4_qp_detach_common(dev
, &qp
, rgid
->gid
,
4435 rgid
->prot
, rgid
->steer
);
4438 list_del(&rgid
->list
);
4443 static int _move_all_busy(struct mlx4_dev
*dev
, int slave
,
4444 enum mlx4_resource type
, int print
)
4446 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4447 struct mlx4_resource_tracker
*tracker
=
4448 &priv
->mfunc
.master
.res_tracker
;
4449 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[type
];
4450 struct res_common
*r
;
4451 struct res_common
*tmp
;
4455 spin_lock_irq(mlx4_tlock(dev
));
4456 list_for_each_entry_safe(r
, tmp
, rlist
, list
) {
4457 if (r
->owner
== slave
) {
4459 if (r
->state
== RES_ANY_BUSY
) {
4462 "%s id 0x%llx is busy\n",
4467 r
->from_state
= r
->state
;
4468 r
->state
= RES_ANY_BUSY
;
4474 spin_unlock_irq(mlx4_tlock(dev
));
4479 static int move_all_busy(struct mlx4_dev
*dev
, int slave
,
4480 enum mlx4_resource type
)
4482 unsigned long begin
;
4487 busy
= _move_all_busy(dev
, slave
, type
, 0);
4488 if (time_after(jiffies
, begin
+ 5 * HZ
))
4495 busy
= _move_all_busy(dev
, slave
, type
, 1);
4499 static void rem_slave_qps(struct mlx4_dev
*dev
, int slave
)
4501 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4502 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4503 struct list_head
*qp_list
=
4504 &tracker
->slave_list
[slave
].res_list
[RES_QP
];
4512 err
= move_all_busy(dev
, slave
, RES_QP
);
4514 mlx4_warn(dev
, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4517 spin_lock_irq(mlx4_tlock(dev
));
4518 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
4519 spin_unlock_irq(mlx4_tlock(dev
));
4520 if (qp
->com
.owner
== slave
) {
4521 qpn
= qp
->com
.res_id
;
4522 detach_qp(dev
, slave
, qp
);
4523 state
= qp
->com
.from_state
;
4524 while (state
!= 0) {
4526 case RES_QP_RESERVED
:
4527 spin_lock_irq(mlx4_tlock(dev
));
4528 rb_erase(&qp
->com
.node
,
4529 &tracker
->res_tree
[RES_QP
]);
4530 list_del(&qp
->com
.list
);
4531 spin_unlock_irq(mlx4_tlock(dev
));
4532 if (!valid_reserved(dev
, slave
, qpn
)) {
4533 __mlx4_qp_release_range(dev
, qpn
, 1);
4534 mlx4_release_resource(dev
, slave
,
4541 if (!valid_reserved(dev
, slave
, qpn
))
4542 __mlx4_qp_free_icm(dev
, qpn
);
4543 state
= RES_QP_RESERVED
;
4547 err
= mlx4_cmd(dev
, in_param
,
4550 MLX4_CMD_TIME_CLASS_A
,
4553 mlx4_dbg(dev
, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4554 slave
, qp
->local_qpn
);
4555 atomic_dec(&qp
->rcq
->ref_count
);
4556 atomic_dec(&qp
->scq
->ref_count
);
4557 atomic_dec(&qp
->mtt
->ref_count
);
4559 atomic_dec(&qp
->srq
->ref_count
);
4560 state
= RES_QP_MAPPED
;
4567 spin_lock_irq(mlx4_tlock(dev
));
4569 spin_unlock_irq(mlx4_tlock(dev
));
4572 static void rem_slave_srqs(struct mlx4_dev
*dev
, int slave
)
4574 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4575 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4576 struct list_head
*srq_list
=
4577 &tracker
->slave_list
[slave
].res_list
[RES_SRQ
];
4578 struct res_srq
*srq
;
4579 struct res_srq
*tmp
;
4586 err
= move_all_busy(dev
, slave
, RES_SRQ
);
4588 mlx4_warn(dev
, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4591 spin_lock_irq(mlx4_tlock(dev
));
4592 list_for_each_entry_safe(srq
, tmp
, srq_list
, com
.list
) {
4593 spin_unlock_irq(mlx4_tlock(dev
));
4594 if (srq
->com
.owner
== slave
) {
4595 srqn
= srq
->com
.res_id
;
4596 state
= srq
->com
.from_state
;
4597 while (state
!= 0) {
4599 case RES_SRQ_ALLOCATED
:
4600 __mlx4_srq_free_icm(dev
, srqn
);
4601 spin_lock_irq(mlx4_tlock(dev
));
4602 rb_erase(&srq
->com
.node
,
4603 &tracker
->res_tree
[RES_SRQ
]);
4604 list_del(&srq
->com
.list
);
4605 spin_unlock_irq(mlx4_tlock(dev
));
4606 mlx4_release_resource(dev
, slave
,
4614 err
= mlx4_cmd(dev
, in_param
, srqn
, 1,
4616 MLX4_CMD_TIME_CLASS_A
,
4619 mlx4_dbg(dev
, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4622 atomic_dec(&srq
->mtt
->ref_count
);
4624 atomic_dec(&srq
->cq
->ref_count
);
4625 state
= RES_SRQ_ALLOCATED
;
4633 spin_lock_irq(mlx4_tlock(dev
));
4635 spin_unlock_irq(mlx4_tlock(dev
));
4638 static void rem_slave_cqs(struct mlx4_dev
*dev
, int slave
)
4640 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4641 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4642 struct list_head
*cq_list
=
4643 &tracker
->slave_list
[slave
].res_list
[RES_CQ
];
4652 err
= move_all_busy(dev
, slave
, RES_CQ
);
4654 mlx4_warn(dev
, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4657 spin_lock_irq(mlx4_tlock(dev
));
4658 list_for_each_entry_safe(cq
, tmp
, cq_list
, com
.list
) {
4659 spin_unlock_irq(mlx4_tlock(dev
));
4660 if (cq
->com
.owner
== slave
&& !atomic_read(&cq
->ref_count
)) {
4661 cqn
= cq
->com
.res_id
;
4662 state
= cq
->com
.from_state
;
4663 while (state
!= 0) {
4665 case RES_CQ_ALLOCATED
:
4666 __mlx4_cq_free_icm(dev
, cqn
);
4667 spin_lock_irq(mlx4_tlock(dev
));
4668 rb_erase(&cq
->com
.node
,
4669 &tracker
->res_tree
[RES_CQ
]);
4670 list_del(&cq
->com
.list
);
4671 spin_unlock_irq(mlx4_tlock(dev
));
4672 mlx4_release_resource(dev
, slave
,
4680 err
= mlx4_cmd(dev
, in_param
, cqn
, 1,
4682 MLX4_CMD_TIME_CLASS_A
,
4685 mlx4_dbg(dev
, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4687 atomic_dec(&cq
->mtt
->ref_count
);
4688 state
= RES_CQ_ALLOCATED
;
4696 spin_lock_irq(mlx4_tlock(dev
));
4698 spin_unlock_irq(mlx4_tlock(dev
));
4701 static void rem_slave_mrs(struct mlx4_dev
*dev
, int slave
)
4703 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4704 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4705 struct list_head
*mpt_list
=
4706 &tracker
->slave_list
[slave
].res_list
[RES_MPT
];
4707 struct res_mpt
*mpt
;
4708 struct res_mpt
*tmp
;
4715 err
= move_all_busy(dev
, slave
, RES_MPT
);
4717 mlx4_warn(dev
, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4720 spin_lock_irq(mlx4_tlock(dev
));
4721 list_for_each_entry_safe(mpt
, tmp
, mpt_list
, com
.list
) {
4722 spin_unlock_irq(mlx4_tlock(dev
));
4723 if (mpt
->com
.owner
== slave
) {
4724 mptn
= mpt
->com
.res_id
;
4725 state
= mpt
->com
.from_state
;
4726 while (state
!= 0) {
4728 case RES_MPT_RESERVED
:
4729 __mlx4_mpt_release(dev
, mpt
->key
);
4730 spin_lock_irq(mlx4_tlock(dev
));
4731 rb_erase(&mpt
->com
.node
,
4732 &tracker
->res_tree
[RES_MPT
]);
4733 list_del(&mpt
->com
.list
);
4734 spin_unlock_irq(mlx4_tlock(dev
));
4735 mlx4_release_resource(dev
, slave
,
4741 case RES_MPT_MAPPED
:
4742 __mlx4_mpt_free_icm(dev
, mpt
->key
);
4743 state
= RES_MPT_RESERVED
;
4748 err
= mlx4_cmd(dev
, in_param
, mptn
, 0,
4750 MLX4_CMD_TIME_CLASS_A
,
4753 mlx4_dbg(dev
, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4756 atomic_dec(&mpt
->mtt
->ref_count
);
4757 state
= RES_MPT_MAPPED
;
4764 spin_lock_irq(mlx4_tlock(dev
));
4766 spin_unlock_irq(mlx4_tlock(dev
));
4769 static void rem_slave_mtts(struct mlx4_dev
*dev
, int slave
)
4771 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4772 struct mlx4_resource_tracker
*tracker
=
4773 &priv
->mfunc
.master
.res_tracker
;
4774 struct list_head
*mtt_list
=
4775 &tracker
->slave_list
[slave
].res_list
[RES_MTT
];
4776 struct res_mtt
*mtt
;
4777 struct res_mtt
*tmp
;
4783 err
= move_all_busy(dev
, slave
, RES_MTT
);
4785 mlx4_warn(dev
, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4788 spin_lock_irq(mlx4_tlock(dev
));
4789 list_for_each_entry_safe(mtt
, tmp
, mtt_list
, com
.list
) {
4790 spin_unlock_irq(mlx4_tlock(dev
));
4791 if (mtt
->com
.owner
== slave
) {
4792 base
= mtt
->com
.res_id
;
4793 state
= mtt
->com
.from_state
;
4794 while (state
!= 0) {
4796 case RES_MTT_ALLOCATED
:
4797 __mlx4_free_mtt_range(dev
, base
,
4799 spin_lock_irq(mlx4_tlock(dev
));
4800 rb_erase(&mtt
->com
.node
,
4801 &tracker
->res_tree
[RES_MTT
]);
4802 list_del(&mtt
->com
.list
);
4803 spin_unlock_irq(mlx4_tlock(dev
));
4804 mlx4_release_resource(dev
, slave
, RES_MTT
,
4805 1 << mtt
->order
, 0);
4815 spin_lock_irq(mlx4_tlock(dev
));
4817 spin_unlock_irq(mlx4_tlock(dev
));
4820 static void rem_slave_fs_rule(struct mlx4_dev
*dev
, int slave
)
4822 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4823 struct mlx4_resource_tracker
*tracker
=
4824 &priv
->mfunc
.master
.res_tracker
;
4825 struct list_head
*fs_rule_list
=
4826 &tracker
->slave_list
[slave
].res_list
[RES_FS_RULE
];
4827 struct res_fs_rule
*fs_rule
;
4828 struct res_fs_rule
*tmp
;
4833 err
= move_all_busy(dev
, slave
, RES_FS_RULE
);
4835 mlx4_warn(dev
, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4838 spin_lock_irq(mlx4_tlock(dev
));
4839 list_for_each_entry_safe(fs_rule
, tmp
, fs_rule_list
, com
.list
) {
4840 spin_unlock_irq(mlx4_tlock(dev
));
4841 if (fs_rule
->com
.owner
== slave
) {
4842 base
= fs_rule
->com
.res_id
;
4843 state
= fs_rule
->com
.from_state
;
4844 while (state
!= 0) {
4846 case RES_FS_RULE_ALLOCATED
:
4848 err
= mlx4_cmd(dev
, base
, 0, 0,
4849 MLX4_QP_FLOW_STEERING_DETACH
,
4850 MLX4_CMD_TIME_CLASS_A
,
4853 spin_lock_irq(mlx4_tlock(dev
));
4854 rb_erase(&fs_rule
->com
.node
,
4855 &tracker
->res_tree
[RES_FS_RULE
]);
4856 list_del(&fs_rule
->com
.list
);
4857 spin_unlock_irq(mlx4_tlock(dev
));
4867 spin_lock_irq(mlx4_tlock(dev
));
4869 spin_unlock_irq(mlx4_tlock(dev
));
4872 static void rem_slave_eqs(struct mlx4_dev
*dev
, int slave
)
4874 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4875 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4876 struct list_head
*eq_list
=
4877 &tracker
->slave_list
[slave
].res_list
[RES_EQ
];
4885 err
= move_all_busy(dev
, slave
, RES_EQ
);
4887 mlx4_warn(dev
, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4890 spin_lock_irq(mlx4_tlock(dev
));
4891 list_for_each_entry_safe(eq
, tmp
, eq_list
, com
.list
) {
4892 spin_unlock_irq(mlx4_tlock(dev
));
4893 if (eq
->com
.owner
== slave
) {
4894 eqn
= eq
->com
.res_id
;
4895 state
= eq
->com
.from_state
;
4896 while (state
!= 0) {
4898 case RES_EQ_RESERVED
:
4899 spin_lock_irq(mlx4_tlock(dev
));
4900 rb_erase(&eq
->com
.node
,
4901 &tracker
->res_tree
[RES_EQ
]);
4902 list_del(&eq
->com
.list
);
4903 spin_unlock_irq(mlx4_tlock(dev
));
4909 err
= mlx4_cmd(dev
, slave
, eqn
& 0x3ff,
4910 1, MLX4_CMD_HW2SW_EQ
,
4911 MLX4_CMD_TIME_CLASS_A
,
4914 mlx4_dbg(dev
, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4915 slave
, eqn
& 0x3ff);
4916 atomic_dec(&eq
->mtt
->ref_count
);
4917 state
= RES_EQ_RESERVED
;
4925 spin_lock_irq(mlx4_tlock(dev
));
4927 spin_unlock_irq(mlx4_tlock(dev
));
4930 static void rem_slave_counters(struct mlx4_dev
*dev
, int slave
)
4932 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4933 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4934 struct list_head
*counter_list
=
4935 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
];
4936 struct res_counter
*counter
;
4937 struct res_counter
*tmp
;
4941 err
= move_all_busy(dev
, slave
, RES_COUNTER
);
4943 mlx4_warn(dev
, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4946 spin_lock_irq(mlx4_tlock(dev
));
4947 list_for_each_entry_safe(counter
, tmp
, counter_list
, com
.list
) {
4948 if (counter
->com
.owner
== slave
) {
4949 index
= counter
->com
.res_id
;
4950 rb_erase(&counter
->com
.node
,
4951 &tracker
->res_tree
[RES_COUNTER
]);
4952 list_del(&counter
->com
.list
);
4954 __mlx4_counter_free(dev
, index
);
4955 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
4958 spin_unlock_irq(mlx4_tlock(dev
));
4961 static void rem_slave_xrcdns(struct mlx4_dev
*dev
, int slave
)
4963 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4964 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4965 struct list_head
*xrcdn_list
=
4966 &tracker
->slave_list
[slave
].res_list
[RES_XRCD
];
4967 struct res_xrcdn
*xrcd
;
4968 struct res_xrcdn
*tmp
;
4972 err
= move_all_busy(dev
, slave
, RES_XRCD
);
4974 mlx4_warn(dev
, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4977 spin_lock_irq(mlx4_tlock(dev
));
4978 list_for_each_entry_safe(xrcd
, tmp
, xrcdn_list
, com
.list
) {
4979 if (xrcd
->com
.owner
== slave
) {
4980 xrcdn
= xrcd
->com
.res_id
;
4981 rb_erase(&xrcd
->com
.node
, &tracker
->res_tree
[RES_XRCD
]);
4982 list_del(&xrcd
->com
.list
);
4984 __mlx4_xrcd_free(dev
, xrcdn
);
4987 spin_unlock_irq(mlx4_tlock(dev
));
4990 void mlx4_delete_all_resources_for_slave(struct mlx4_dev
*dev
, int slave
)
4992 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4993 mlx4_reset_roce_gids(dev
, slave
);
4994 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
4995 rem_slave_vlans(dev
, slave
);
4996 rem_slave_macs(dev
, slave
);
4997 rem_slave_fs_rule(dev
, slave
);
4998 rem_slave_qps(dev
, slave
);
4999 rem_slave_srqs(dev
, slave
);
5000 rem_slave_cqs(dev
, slave
);
5001 rem_slave_mrs(dev
, slave
);
5002 rem_slave_eqs(dev
, slave
);
5003 rem_slave_mtts(dev
, slave
);
5004 rem_slave_counters(dev
, slave
);
5005 rem_slave_xrcdns(dev
, slave
);
5006 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
5009 void mlx4_vf_immed_vlan_work_handler(struct work_struct
*_work
)
5011 struct mlx4_vf_immed_vlan_work
*work
=
5012 container_of(_work
, struct mlx4_vf_immed_vlan_work
, work
);
5013 struct mlx4_cmd_mailbox
*mailbox
;
5014 struct mlx4_update_qp_context
*upd_context
;
5015 struct mlx4_dev
*dev
= &work
->priv
->dev
;
5016 struct mlx4_resource_tracker
*tracker
=
5017 &work
->priv
->mfunc
.master
.res_tracker
;
5018 struct list_head
*qp_list
=
5019 &tracker
->slave_list
[work
->slave
].res_list
[RES_QP
];
5022 u64 qp_path_mask_vlan_ctrl
=
5023 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED
) |
5024 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P
) |
5025 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED
) |
5026 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED
) |
5027 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P
) |
5028 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED
));
5030 u64 qp_path_mask
= ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX
) |
5031 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL
) |
5032 (1ULL << MLX4_UPD_QP_PATH_MASK_CV
) |
5033 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN
) |
5034 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP
) |
5035 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX
) |
5036 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE
));
5039 int port
, errors
= 0;
5042 if (mlx4_is_slave(dev
)) {
5043 mlx4_warn(dev
, "Trying to update-qp in slave %d\n",
5048 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
5049 if (IS_ERR(mailbox
))
5051 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE
) /* block all */
5052 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5053 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
5054 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
5055 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
5056 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
5057 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
5058 else if (!work
->vlan_id
)
5059 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5060 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
5062 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
5063 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
5064 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
5066 upd_context
= mailbox
->buf
;
5067 upd_context
->qp_mask
= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD
);
5069 spin_lock_irq(mlx4_tlock(dev
));
5070 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
5071 spin_unlock_irq(mlx4_tlock(dev
));
5072 if (qp
->com
.owner
== work
->slave
) {
5073 if (qp
->com
.from_state
!= RES_QP_HW
||
5074 !qp
->sched_queue
|| /* no INIT2RTR trans yet */
5075 mlx4_is_qp_reserved(dev
, qp
->local_qpn
) ||
5076 qp
->qpc_flags
& (1 << MLX4_RSS_QPC_FLAG_OFFSET
)) {
5077 spin_lock_irq(mlx4_tlock(dev
));
5080 port
= (qp
->sched_queue
>> 6 & 1) + 1;
5081 if (port
!= work
->port
) {
5082 spin_lock_irq(mlx4_tlock(dev
));
5085 if (MLX4_QP_ST_RC
== ((qp
->qpc_flags
>> 16) & 0xff))
5086 upd_context
->primary_addr_path_mask
= cpu_to_be64(qp_path_mask
);
5088 upd_context
->primary_addr_path_mask
=
5089 cpu_to_be64(qp_path_mask
| qp_path_mask_vlan_ctrl
);
5090 if (work
->vlan_id
== MLX4_VGT
) {
5091 upd_context
->qp_context
.param3
= qp
->param3
;
5092 upd_context
->qp_context
.pri_path
.vlan_control
= qp
->vlan_control
;
5093 upd_context
->qp_context
.pri_path
.fvl_rx
= qp
->fvl_rx
;
5094 upd_context
->qp_context
.pri_path
.vlan_index
= qp
->vlan_index
;
5095 upd_context
->qp_context
.pri_path
.fl
= qp
->pri_path_fl
;
5096 upd_context
->qp_context
.pri_path
.feup
= qp
->feup
;
5097 upd_context
->qp_context
.pri_path
.sched_queue
=
5100 upd_context
->qp_context
.param3
= qp
->param3
& ~cpu_to_be32(MLX4_STRIP_VLAN
);
5101 upd_context
->qp_context
.pri_path
.vlan_control
= vlan_control
;
5102 upd_context
->qp_context
.pri_path
.vlan_index
= work
->vlan_ix
;
5103 upd_context
->qp_context
.pri_path
.fvl_rx
=
5104 qp
->fvl_rx
| MLX4_FVL_RX_FORCE_ETH_VLAN
;
5105 upd_context
->qp_context
.pri_path
.fl
=
5106 qp
->pri_path_fl
| MLX4_FL_CV
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
5107 upd_context
->qp_context
.pri_path
.feup
=
5108 qp
->feup
| MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
5109 upd_context
->qp_context
.pri_path
.sched_queue
=
5110 qp
->sched_queue
& 0xC7;
5111 upd_context
->qp_context
.pri_path
.sched_queue
|=
5112 ((work
->qos
& 0x7) << 3);
5113 upd_context
->qp_mask
|=
5115 MLX4_UPD_QP_MASK_QOS_VPP
);
5116 upd_context
->qp_context
.qos_vport
=
5120 err
= mlx4_cmd(dev
, mailbox
->dma
,
5121 qp
->local_qpn
& 0xffffff,
5122 0, MLX4_CMD_UPDATE_QP
,
5123 MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
5125 mlx4_info(dev
, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5126 work
->slave
, port
, qp
->local_qpn
, err
);
5130 spin_lock_irq(mlx4_tlock(dev
));
5132 spin_unlock_irq(mlx4_tlock(dev
));
5133 mlx4_free_cmd_mailbox(dev
, mailbox
);
5136 mlx4_err(dev
, "%d UPDATE_QP failures for slave %d, port %d\n",
5137 errors
, work
->slave
, work
->port
);
5139 /* unregister previous vlan_id if needed and we had no errors
5140 * while updating the QPs
5142 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_VLAN
&& !errors
&&
5143 NO_INDX
!= work
->orig_vlan_ix
)
5144 __mlx4_unregister_vlan(&work
->priv
->dev
, work
->port
,
5145 work
->orig_vlan_id
);