2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
44 MLX4_COMMAND_INTERFACE_MIN_REV
= 2,
45 MLX4_COMMAND_INTERFACE_MAX_REV
= 3,
46 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS
= 3,
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
52 static bool enable_qos
;
53 module_param(enable_qos
, bool, 0444);
54 MODULE_PARM_DESC(enable_qos
, "Enable Quality of Service support in the HCA (default: off)");
56 #define MLX4_GET(dest, source, offset) \
58 void *__p = (char *) (source) + (offset); \
59 switch (sizeof (dest)) { \
60 case 1: (dest) = *(u8 *) __p; break; \
61 case 2: (dest) = be16_to_cpup(__p); break; \
62 case 4: (dest) = be32_to_cpup(__p); break; \
63 case 8: (dest) = be64_to_cpup(__p); break; \
64 default: __buggy_use_of_MLX4_GET(); \
68 #define MLX4_PUT(dest, source, offset) \
70 void *__d = ((char *) (dest) + (offset)); \
71 switch (sizeof(source)) { \
72 case 1: *(u8 *) __d = (source); break; \
73 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
74 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
75 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
76 default: __buggy_use_of_MLX4_PUT(); \
80 static void dump_dev_cap_flags(struct mlx4_dev
*dev
, u64 flags
)
82 static const char *fname
[] = {
83 [ 0] = "RC transport",
84 [ 1] = "UC transport",
85 [ 2] = "UD transport",
86 [ 3] = "XRC transport",
87 [ 4] = "reliable multicast",
88 [ 5] = "FCoIB support",
90 [ 7] = "IPoIB checksum offload",
91 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter",
94 [12] = "Dual Port Different Protocol (DPDP) support",
95 [15] = "Big LSO headers",
98 [18] = "Atomic ops support",
99 [19] = "Raw multicast support",
100 [20] = "Address vector port checking support",
101 [21] = "UD multicast support",
102 [24] = "Demand paging support",
103 [25] = "Router support",
104 [30] = "IBoE support",
105 [32] = "Unicast loopback support",
106 [34] = "FCS header control",
107 [38] = "Wake On LAN support",
108 [40] = "UDP RSS support",
109 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support",
111 [48] = "Counters support",
112 [53] = "Port ETS Scheduler support",
113 [55] = "Port link type sensing support",
114 [59] = "Port management change event support",
115 [61] = "64 byte EQE support",
116 [62] = "64 byte CQE support",
120 mlx4_dbg(dev
, "DEV_CAP flags:\n");
121 for (i
= 0; i
< ARRAY_SIZE(fname
); ++i
)
122 if (fname
[i
] && (flags
& (1LL << i
)))
123 mlx4_dbg(dev
, " %s\n", fname
[i
]);
126 static void dump_dev_cap_flags2(struct mlx4_dev
*dev
, u64 flags
)
128 static const char * const fname
[] = {
130 [1] = "RSS Toeplitz Hash Function support",
131 [2] = "RSS XOR Hash Function support",
132 [3] = "Device managed flow steering support",
133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support",
135 [6] = "VST (control vlan insertion/stripping) support",
136 [7] = "FSM (MAC anti-spoofing) support",
137 [8] = "Dynamic QP updates support",
138 [9] = "Device managed flow steering IPoIB support",
139 [10] = "TCP/IP offloads/flow-steering for VXLAN support"
143 for (i
= 0; i
< ARRAY_SIZE(fname
); ++i
)
144 if (fname
[i
] && (flags
& (1LL << i
)))
145 mlx4_dbg(dev
, " %s\n", fname
[i
]);
148 int mlx4_MOD_STAT_CFG(struct mlx4_dev
*dev
, struct mlx4_mod_stat_cfg
*cfg
)
150 struct mlx4_cmd_mailbox
*mailbox
;
154 #define MOD_STAT_CFG_IN_SIZE 0x100
156 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
157 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
159 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
161 return PTR_ERR(mailbox
);
162 inbox
= mailbox
->buf
;
164 MLX4_PUT(inbox
, cfg
->log_pg_sz
, MOD_STAT_CFG_PG_SZ_OFFSET
);
165 MLX4_PUT(inbox
, cfg
->log_pg_sz_m
, MOD_STAT_CFG_PG_SZ_M_OFFSET
);
167 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_MOD_STAT_CFG
,
168 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
170 mlx4_free_cmd_mailbox(dev
, mailbox
);
174 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev
*dev
, int slave
,
175 struct mlx4_vhcr
*vhcr
,
176 struct mlx4_cmd_mailbox
*inbox
,
177 struct mlx4_cmd_mailbox
*outbox
,
178 struct mlx4_cmd_info
*cmd
)
180 struct mlx4_priv
*priv
= mlx4_priv(dev
);
182 u32 size
, proxy_qp
, qkey
;
185 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
186 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
187 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
188 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
189 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
190 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
191 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
192 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
193 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
194 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
195 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
196 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
198 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
199 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
200 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
201 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
202 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
203 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
205 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
206 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
207 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
208 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
210 /* when opcode modifier = 1 */
211 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
212 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4
213 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
214 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
216 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
217 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
218 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
219 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
220 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
222 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
223 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
224 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
225 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
227 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
229 if (vhcr
->op_modifier
== 1) {
230 struct mlx4_active_ports actv_ports
=
231 mlx4_get_active_ports(dev
, slave
);
232 int converted_port
= mlx4_slave_convert_port(
233 dev
, slave
, vhcr
->in_modifier
);
235 if (converted_port
< 0)
238 vhcr
->in_modifier
= converted_port
;
239 /* phys-port = logical-port */
240 field
= vhcr
->in_modifier
-
241 find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
242 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_PHYS_PORT_OFFSET
);
244 port
= vhcr
->in_modifier
;
245 proxy_qp
= dev
->phys_caps
.base_proxy_sqpn
+ 8 * slave
+ port
- 1;
247 /* Set nic_info bit to mark new fields support */
248 field
= QUERY_FUNC_CAP_FLAGS1_NIC_INFO
;
250 if (mlx4_vf_smi_enabled(dev
, slave
, port
) &&
251 !mlx4_get_parav_qkey(dev
, proxy_qp
, &qkey
)) {
252 field
|= QUERY_FUNC_CAP_VF_ENABLE_QP0
;
253 MLX4_PUT(outbox
->buf
, qkey
,
254 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET
);
256 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FLAGS1_OFFSET
);
258 /* size is now the QP number */
259 size
= dev
->phys_caps
.base_tunnel_sqpn
+ 8 * slave
+ port
- 1;
260 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP0_TUNNEL
);
263 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP1_TUNNEL
);
265 MLX4_PUT(outbox
->buf
, proxy_qp
, QUERY_FUNC_CAP_QP0_PROXY
);
267 MLX4_PUT(outbox
->buf
, proxy_qp
, QUERY_FUNC_CAP_QP1_PROXY
);
269 MLX4_PUT(outbox
->buf
, dev
->caps
.phys_port_id
[vhcr
->in_modifier
],
270 QUERY_FUNC_CAP_PHYS_PORT_ID
);
272 } else if (vhcr
->op_modifier
== 0) {
273 struct mlx4_active_ports actv_ports
=
274 mlx4_get_active_ports(dev
, slave
);
275 /* enable rdma and ethernet interfaces, and new quota locations */
276 field
= (QUERY_FUNC_CAP_FLAG_ETH
| QUERY_FUNC_CAP_FLAG_RDMA
|
277 QUERY_FUNC_CAP_FLAG_QUOTAS
);
278 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FLAGS_OFFSET
);
281 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
),
282 dev
->caps
.num_ports
);
283 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_NUM_PORTS_OFFSET
);
285 size
= dev
->caps
.function_caps
; /* set PF behaviours */
286 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_PF_BHVR_OFFSET
);
288 field
= 0; /* protected FMR support not available as yet */
289 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FMR_OFFSET
);
291 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[slave
];
292 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET
);
293 size
= dev
->caps
.num_qps
;
294 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP
);
296 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[slave
];
297 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET
);
298 size
= dev
->caps
.num_srqs
;
299 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP
);
301 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[slave
];
302 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET
);
303 size
= dev
->caps
.num_cqs
;
304 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP
);
306 size
= dev
->caps
.num_eqs
;
307 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
309 size
= dev
->caps
.reserved_eqs
;
310 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
312 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[slave
];
313 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET
);
314 size
= dev
->caps
.num_mpts
;
315 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP
);
317 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[slave
];
318 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET
);
319 size
= dev
->caps
.num_mtts
;
320 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP
);
322 size
= dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
;
323 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET
);
324 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP
);
332 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev
*dev
, u32 gen_or_port
,
333 struct mlx4_func_cap
*func_cap
)
335 struct mlx4_cmd_mailbox
*mailbox
;
337 u8 field
, op_modifier
;
339 int err
= 0, quotas
= 0;
341 op_modifier
= !!gen_or_port
; /* 0 = general, 1 = logical port */
343 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
345 return PTR_ERR(mailbox
);
347 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, gen_or_port
, op_modifier
,
348 MLX4_CMD_QUERY_FUNC_CAP
,
349 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
353 outbox
= mailbox
->buf
;
356 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_FLAGS_OFFSET
);
357 if (!(field
& (QUERY_FUNC_CAP_FLAG_ETH
| QUERY_FUNC_CAP_FLAG_RDMA
))) {
358 mlx4_err(dev
, "The host supports neither eth nor rdma interfaces\n");
359 err
= -EPROTONOSUPPORT
;
362 func_cap
->flags
= field
;
363 quotas
= !!(func_cap
->flags
& QUERY_FUNC_CAP_FLAG_QUOTAS
);
365 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_NUM_PORTS_OFFSET
);
366 func_cap
->num_ports
= field
;
368 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_PF_BHVR_OFFSET
);
369 func_cap
->pf_context_behaviour
= size
;
372 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET
);
373 func_cap
->qp_quota
= size
& 0xFFFFFF;
375 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET
);
376 func_cap
->srq_quota
= size
& 0xFFFFFF;
378 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET
);
379 func_cap
->cq_quota
= size
& 0xFFFFFF;
381 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET
);
382 func_cap
->mpt_quota
= size
& 0xFFFFFF;
384 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET
);
385 func_cap
->mtt_quota
= size
& 0xFFFFFF;
387 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET
);
388 func_cap
->mcg_quota
= size
& 0xFFFFFF;
391 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP
);
392 func_cap
->qp_quota
= size
& 0xFFFFFF;
394 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP
);
395 func_cap
->srq_quota
= size
& 0xFFFFFF;
397 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP
);
398 func_cap
->cq_quota
= size
& 0xFFFFFF;
400 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP
);
401 func_cap
->mpt_quota
= size
& 0xFFFFFF;
403 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP
);
404 func_cap
->mtt_quota
= size
& 0xFFFFFF;
406 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP
);
407 func_cap
->mcg_quota
= size
& 0xFFFFFF;
409 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
410 func_cap
->max_eq
= size
& 0xFFFFFF;
412 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
413 func_cap
->reserved_eq
= size
& 0xFFFFFF;
418 /* logical port query */
419 if (gen_or_port
> dev
->caps
.num_ports
) {
424 MLX4_GET(func_cap
->flags1
, outbox
, QUERY_FUNC_CAP_FLAGS1_OFFSET
);
425 if (dev
->caps
.port_type
[gen_or_port
] == MLX4_PORT_TYPE_ETH
) {
426 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN
) {
427 mlx4_err(dev
, "VLAN is enforced on this port\n");
428 err
= -EPROTONOSUPPORT
;
432 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_FORCE_MAC
) {
433 mlx4_err(dev
, "Force mac is enabled on this port\n");
434 err
= -EPROTONOSUPPORT
;
437 } else if (dev
->caps
.port_type
[gen_or_port
] == MLX4_PORT_TYPE_IB
) {
438 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_FLAGS0_OFFSET
);
439 if (field
& QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID
) {
440 mlx4_err(dev
, "phy_wqe_gid is "
441 "enforced on this ib port\n");
442 err
= -EPROTONOSUPPORT
;
447 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_PHYS_PORT_OFFSET
);
448 func_cap
->physical_port
= field
;
449 if (func_cap
->physical_port
!= gen_or_port
) {
454 if (func_cap
->flags1
& QUERY_FUNC_CAP_VF_ENABLE_QP0
) {
455 MLX4_GET(qkey
, outbox
, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET
);
456 func_cap
->qp0_qkey
= qkey
;
458 func_cap
->qp0_qkey
= 0;
461 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP0_TUNNEL
);
462 func_cap
->qp0_tunnel_qpn
= size
& 0xFFFFFF;
464 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP0_PROXY
);
465 func_cap
->qp0_proxy_qpn
= size
& 0xFFFFFF;
467 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP1_TUNNEL
);
468 func_cap
->qp1_tunnel_qpn
= size
& 0xFFFFFF;
470 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP1_PROXY
);
471 func_cap
->qp1_proxy_qpn
= size
& 0xFFFFFF;
473 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_NIC_INFO
)
474 MLX4_GET(func_cap
->phys_port_id
, outbox
,
475 QUERY_FUNC_CAP_PHYS_PORT_ID
);
477 /* All other resources are allocated by the master, but we still report
478 * 'num' and 'reserved' capabilities as follows:
479 * - num remains the maximum resource index
480 * - 'num - reserved' is the total available objects of a resource, but
481 * resource indices may be less than 'reserved'
482 * TODO: set per-resource quotas */
485 mlx4_free_cmd_mailbox(dev
, mailbox
);
490 int mlx4_QUERY_DEV_CAP(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
492 struct mlx4_cmd_mailbox
*mailbox
;
495 u32 field32
, flags
, ext_flags
;
501 #define QUERY_DEV_CAP_OUT_SIZE 0x100
502 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
503 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
504 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
505 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
506 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
507 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
508 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
509 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
510 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
511 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
512 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
513 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
514 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
515 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
516 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
517 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
518 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
519 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
520 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
521 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
522 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
523 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
524 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
525 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
526 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
527 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
528 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
529 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
530 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
531 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
532 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
533 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
534 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
535 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
536 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
537 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
538 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
539 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
540 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
541 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
542 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
543 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
544 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
545 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
546 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
547 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
548 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
549 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
550 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
551 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
552 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
553 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
554 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
555 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
556 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
557 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
558 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
559 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
560 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
561 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
562 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
563 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
564 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
565 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
566 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
567 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
568 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
569 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
570 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
571 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
572 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
573 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
574 #define QUERY_DEV_CAP_VXLAN 0x9e
577 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
579 return PTR_ERR(mailbox
);
580 outbox
= mailbox
->buf
;
582 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
583 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
587 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_QP_OFFSET
);
588 dev_cap
->reserved_qps
= 1 << (field
& 0xf);
589 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_OFFSET
);
590 dev_cap
->max_qps
= 1 << (field
& 0x1f);
591 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_SRQ_OFFSET
);
592 dev_cap
->reserved_srqs
= 1 << (field
>> 4);
593 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SRQ_OFFSET
);
594 dev_cap
->max_srqs
= 1 << (field
& 0x1f);
595 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET
);
596 dev_cap
->max_cq_sz
= 1 << field
;
597 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_CQ_OFFSET
);
598 dev_cap
->reserved_cqs
= 1 << (field
& 0xf);
599 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_CQ_OFFSET
);
600 dev_cap
->max_cqs
= 1 << (field
& 0x1f);
601 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MPT_OFFSET
);
602 dev_cap
->max_mpts
= 1 << (field
& 0x3f);
603 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_EQ_OFFSET
);
604 dev_cap
->reserved_eqs
= field
& 0xf;
605 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_EQ_OFFSET
);
606 dev_cap
->max_eqs
= 1 << (field
& 0xf);
607 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MTT_OFFSET
);
608 dev_cap
->reserved_mtts
= 1 << (field
>> 4);
609 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET
);
610 dev_cap
->max_mrw_sz
= 1 << field
;
611 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MRW_OFFSET
);
612 dev_cap
->reserved_mrws
= 1 << (field
& 0xf);
613 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET
);
614 dev_cap
->max_mtt_seg
= 1 << (field
& 0x3f);
615 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET
);
616 dev_cap
->max_requester_per_qp
= 1 << (field
& 0x3f);
617 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_RES_QP_OFFSET
);
618 dev_cap
->max_responder_per_qp
= 1 << (field
& 0x3f);
619 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_GSO_OFFSET
);
622 dev_cap
->max_gso_sz
= 0;
624 dev_cap
->max_gso_sz
= 1 << field
;
626 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSS_OFFSET
);
628 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS_XOR
;
630 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS_TOP
;
633 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS
;
634 dev_cap
->max_rss_tbl_sz
= 1 << field
;
636 dev_cap
->max_rss_tbl_sz
= 0;
637 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_RDMA_OFFSET
);
638 dev_cap
->max_rdma_global
= 1 << (field
& 0x3f);
639 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_ACK_DELAY_OFFSET
);
640 dev_cap
->local_ca_ack_delay
= field
& 0x1f;
641 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
642 dev_cap
->num_ports
= field
& 0xf;
643 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET
);
644 dev_cap
->max_msg_sz
= 1 << (field
& 0x1f);
645 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
647 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_FS_EN
;
648 dev_cap
->fs_log_max_ucast_qp_range_size
= field
& 0x1f;
649 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
651 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
;
652 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET
);
653 dev_cap
->fs_max_num_qp_per_entry
= field
;
654 MLX4_GET(stat_rate
, outbox
, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET
);
655 dev_cap
->stat_rate_support
= stat_rate
;
656 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
658 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_TS
;
659 MLX4_GET(ext_flags
, outbox
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
660 MLX4_GET(flags
, outbox
, QUERY_DEV_CAP_FLAGS_OFFSET
);
661 dev_cap
->flags
= flags
| (u64
)ext_flags
<< 32;
662 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_UAR_OFFSET
);
663 dev_cap
->reserved_uars
= field
>> 4;
664 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_UAR_SZ_OFFSET
);
665 dev_cap
->uar_size
= 1 << ((field
& 0x3f) + 20);
666 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_PAGE_SZ_OFFSET
);
667 dev_cap
->min_page_sz
= 1 << field
;
669 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_BF_OFFSET
);
671 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET
);
672 dev_cap
->bf_reg_size
= 1 << (field
& 0x1f);
673 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET
);
674 if ((1 << (field
& 0x3f)) > (PAGE_SIZE
/ dev_cap
->bf_reg_size
))
676 dev_cap
->bf_regs_per_page
= 1 << (field
& 0x3f);
677 mlx4_dbg(dev
, "BlueFlame available (reg size %d, regs/page %d)\n",
678 dev_cap
->bf_reg_size
, dev_cap
->bf_regs_per_page
);
680 dev_cap
->bf_reg_size
= 0;
681 mlx4_dbg(dev
, "BlueFlame not available\n");
684 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET
);
685 dev_cap
->max_sq_sg
= field
;
686 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET
);
687 dev_cap
->max_sq_desc_sz
= size
;
689 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET
);
690 dev_cap
->max_qp_per_mcg
= 1 << field
;
691 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MCG_OFFSET
);
692 dev_cap
->reserved_mgms
= field
& 0xf;
693 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MCG_OFFSET
);
694 dev_cap
->max_mcgs
= 1 << field
;
695 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_PD_OFFSET
);
696 dev_cap
->reserved_pds
= field
>> 4;
697 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_PD_OFFSET
);
698 dev_cap
->max_pds
= 1 << (field
& 0x3f);
699 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_XRC_OFFSET
);
700 dev_cap
->reserved_xrcds
= field
>> 4;
701 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_XRC_OFFSET
);
702 dev_cap
->max_xrcds
= 1 << (field
& 0x1f);
704 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET
);
705 dev_cap
->rdmarc_entry_sz
= size
;
706 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET
);
707 dev_cap
->qpc_entry_sz
= size
;
708 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET
);
709 dev_cap
->aux_entry_sz
= size
;
710 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET
);
711 dev_cap
->altc_entry_sz
= size
;
712 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET
);
713 dev_cap
->eqc_entry_sz
= size
;
714 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET
);
715 dev_cap
->cqc_entry_sz
= size
;
716 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET
);
717 dev_cap
->srq_entry_sz
= size
;
718 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET
);
719 dev_cap
->cmpt_entry_sz
= size
;
720 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET
);
721 dev_cap
->mtt_entry_sz
= size
;
722 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET
);
723 dev_cap
->dmpt_entry_sz
= size
;
725 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET
);
726 dev_cap
->max_srq_sz
= 1 << field
;
727 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET
);
728 dev_cap
->max_qp_sz
= 1 << field
;
729 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSZ_SRQ_OFFSET
);
730 dev_cap
->resize_srq
= field
& 1;
731 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET
);
732 dev_cap
->max_rq_sg
= field
;
733 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET
);
734 dev_cap
->max_rq_desc_sz
= size
;
736 MLX4_GET(dev_cap
->bmme_flags
, outbox
,
737 QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
738 MLX4_GET(dev_cap
->reserved_lkey
, outbox
,
739 QUERY_DEV_CAP_RSVD_LKEY_OFFSET
);
740 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FW_REASSIGN_MAC
);
742 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN
;
743 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VXLAN
);
745 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
;
746 MLX4_GET(dev_cap
->max_icm_sz
, outbox
,
747 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET
);
748 if (dev_cap
->flags
& MLX4_DEV_CAP_FLAG_COUNTERS
)
749 MLX4_GET(dev_cap
->max_counters
, outbox
,
750 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET
);
752 MLX4_GET(field32
, outbox
, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET
);
753 if (field32
& (1 << 16))
754 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_UPDATE_QP
;
755 if (field32
& (1 << 26))
756 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL
;
757 if (field32
& (1 << 20))
758 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_FSM
;
760 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
761 for (i
= 1; i
<= dev_cap
->num_ports
; ++i
) {
762 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
763 dev_cap
->max_vl
[i
] = field
>> 4;
764 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MTU_WIDTH_OFFSET
);
765 dev_cap
->ib_mtu
[i
] = field
>> 4;
766 dev_cap
->max_port_width
[i
] = field
& 0xf;
767 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_GID_OFFSET
);
768 dev_cap
->max_gids
[i
] = 1 << (field
& 0xf);
769 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_PKEY_OFFSET
);
770 dev_cap
->max_pkeys
[i
] = 1 << (field
& 0xf);
773 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
774 #define QUERY_PORT_MTU_OFFSET 0x01
775 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
776 #define QUERY_PORT_WIDTH_OFFSET 0x06
777 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
778 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
779 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
780 #define QUERY_PORT_MAC_OFFSET 0x10
781 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
782 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
783 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
785 for (i
= 1; i
<= dev_cap
->num_ports
; ++i
) {
786 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, i
, 0, MLX4_CMD_QUERY_PORT
,
787 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
791 MLX4_GET(field
, outbox
, QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
792 dev_cap
->supported_port_types
[i
] = field
& 3;
793 dev_cap
->suggested_type
[i
] = (field
>> 3) & 1;
794 dev_cap
->default_sense
[i
] = (field
>> 4) & 1;
795 MLX4_GET(field
, outbox
, QUERY_PORT_MTU_OFFSET
);
796 dev_cap
->ib_mtu
[i
] = field
& 0xf;
797 MLX4_GET(field
, outbox
, QUERY_PORT_WIDTH_OFFSET
);
798 dev_cap
->max_port_width
[i
] = field
& 0xf;
799 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_GID_PKEY_OFFSET
);
800 dev_cap
->max_gids
[i
] = 1 << (field
>> 4);
801 dev_cap
->max_pkeys
[i
] = 1 << (field
& 0xf);
802 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_VL_OFFSET
);
803 dev_cap
->max_vl
[i
] = field
& 0xf;
804 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_MACVLAN_OFFSET
);
805 dev_cap
->log_max_macs
[i
] = field
& 0xf;
806 dev_cap
->log_max_vlans
[i
] = field
>> 4;
807 MLX4_GET(dev_cap
->eth_mtu
[i
], outbox
, QUERY_PORT_ETH_MTU_OFFSET
);
808 MLX4_GET(dev_cap
->def_mac
[i
], outbox
, QUERY_PORT_MAC_OFFSET
);
809 MLX4_GET(field32
, outbox
, QUERY_PORT_TRANS_VENDOR_OFFSET
);
810 dev_cap
->trans_type
[i
] = field32
>> 24;
811 dev_cap
->vendor_oui
[i
] = field32
& 0xffffff;
812 MLX4_GET(dev_cap
->wavelength
[i
], outbox
, QUERY_PORT_WAVELENGTH_OFFSET
);
813 MLX4_GET(dev_cap
->trans_code
[i
], outbox
, QUERY_PORT_TRANS_CODE_OFFSET
);
817 mlx4_dbg(dev
, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
818 dev_cap
->bmme_flags
, dev_cap
->reserved_lkey
);
821 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
822 * we can't use any EQs whose doorbell falls on that page,
823 * even if the EQ itself isn't reserved.
825 dev_cap
->reserved_eqs
= max(dev_cap
->reserved_uars
* 4,
826 dev_cap
->reserved_eqs
);
828 mlx4_dbg(dev
, "Max ICM size %lld MB\n",
829 (unsigned long long) dev_cap
->max_icm_sz
>> 20);
830 mlx4_dbg(dev
, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
831 dev_cap
->max_qps
, dev_cap
->reserved_qps
, dev_cap
->qpc_entry_sz
);
832 mlx4_dbg(dev
, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
833 dev_cap
->max_srqs
, dev_cap
->reserved_srqs
, dev_cap
->srq_entry_sz
);
834 mlx4_dbg(dev
, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
835 dev_cap
->max_cqs
, dev_cap
->reserved_cqs
, dev_cap
->cqc_entry_sz
);
836 mlx4_dbg(dev
, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
837 dev_cap
->max_eqs
, dev_cap
->reserved_eqs
, dev_cap
->eqc_entry_sz
);
838 mlx4_dbg(dev
, "reserved MPTs: %d, reserved MTTs: %d\n",
839 dev_cap
->reserved_mrws
, dev_cap
->reserved_mtts
);
840 mlx4_dbg(dev
, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
841 dev_cap
->max_pds
, dev_cap
->reserved_pds
, dev_cap
->reserved_uars
);
842 mlx4_dbg(dev
, "Max QP/MCG: %d, reserved MGMs: %d\n",
843 dev_cap
->max_pds
, dev_cap
->reserved_mgms
);
844 mlx4_dbg(dev
, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
845 dev_cap
->max_cq_sz
, dev_cap
->max_qp_sz
, dev_cap
->max_srq_sz
);
846 mlx4_dbg(dev
, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
847 dev_cap
->local_ca_ack_delay
, 128 << dev_cap
->ib_mtu
[1],
848 dev_cap
->max_port_width
[1]);
849 mlx4_dbg(dev
, "Max SQ desc size: %d, max SQ S/G: %d\n",
850 dev_cap
->max_sq_desc_sz
, dev_cap
->max_sq_sg
);
851 mlx4_dbg(dev
, "Max RQ desc size: %d, max RQ S/G: %d\n",
852 dev_cap
->max_rq_desc_sz
, dev_cap
->max_rq_sg
);
853 mlx4_dbg(dev
, "Max GSO size: %d\n", dev_cap
->max_gso_sz
);
854 mlx4_dbg(dev
, "Max counters: %d\n", dev_cap
->max_counters
);
855 mlx4_dbg(dev
, "Max RSS Table size: %d\n", dev_cap
->max_rss_tbl_sz
);
857 dump_dev_cap_flags(dev
, dev_cap
->flags
);
858 dump_dev_cap_flags2(dev
, dev_cap
->flags2
);
861 mlx4_free_cmd_mailbox(dev
, mailbox
);
865 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev
*dev
, int slave
,
866 struct mlx4_vhcr
*vhcr
,
867 struct mlx4_cmd_mailbox
*inbox
,
868 struct mlx4_cmd_mailbox
*outbox
,
869 struct mlx4_cmd_info
*cmd
)
878 struct mlx4_active_ports actv_ports
;
880 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
881 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
885 /* add port mng change event capability and disable mw type 1
886 * unconditionally to slaves
888 MLX4_GET(flags
, outbox
->buf
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
889 flags
|= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV
;
890 flags
&= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW
;
891 actv_ports
= mlx4_get_active_ports(dev
, slave
);
892 first_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
893 for (slave_port
= 0, real_port
= first_port
;
894 real_port
< first_port
+
895 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
896 ++real_port
, ++slave_port
) {
897 if (flags
& (MLX4_DEV_CAP_FLAG_WOL_PORT1
<< real_port
))
898 flags
|= MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
;
900 flags
&= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
);
902 for (; slave_port
< dev
->caps
.num_ports
; ++slave_port
)
903 flags
&= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
);
904 MLX4_PUT(outbox
->buf
, flags
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
906 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
908 field
|= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
) & 0x0F;
909 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
911 /* For guests, disable timestamp */
912 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
914 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
916 /* For guests, disable vxlan tunneling */
917 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_VXLAN
);
919 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_VXLAN
);
921 /* For guests, report Blueflame disabled */
922 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_BF_OFFSET
);
924 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_BF_OFFSET
);
926 /* For guests, disable mw type 2 */
927 MLX4_GET(bmme_flags
, outbox
->buf
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
928 bmme_flags
&= ~MLX4_BMME_FLAG_TYPE_2_WIN
;
929 MLX4_PUT(outbox
->buf
, bmme_flags
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
931 /* turn off device-managed steering capability if not enabled */
932 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_DEVICE_MANAGED
) {
933 MLX4_GET(field
, outbox
->buf
,
934 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
936 MLX4_PUT(outbox
->buf
, field
,
937 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
940 /* turn off ipoib managed steering for guests */
941 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
943 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
948 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
949 struct mlx4_vhcr
*vhcr
,
950 struct mlx4_cmd_mailbox
*inbox
,
951 struct mlx4_cmd_mailbox
*outbox
,
952 struct mlx4_cmd_info
*cmd
)
954 struct mlx4_priv
*priv
= mlx4_priv(dev
);
959 int admin_link_state
;
960 int port
= mlx4_slave_convert_port(dev
, slave
,
961 vhcr
->in_modifier
& 0xFF);
963 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
964 #define MLX4_PORT_LINK_UP_MASK 0x80
965 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
966 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
971 vhcr
->in_modifier
= (vhcr
->in_modifier
& ~0xFF) |
974 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, vhcr
->in_modifier
, 0,
975 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
978 if (!err
&& dev
->caps
.function
!= slave
) {
979 def_mac
= priv
->mfunc
.master
.vf_oper
[slave
].vport
[vhcr
->in_modifier
].state
.mac
;
980 MLX4_PUT(outbox
->buf
, def_mac
, QUERY_PORT_MAC_OFFSET
);
982 /* get port type - currently only eth is enabled */
983 MLX4_GET(port_type
, outbox
->buf
,
984 QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
986 /* No link sensing allowed */
987 port_type
&= MLX4_VF_PORT_NO_LINK_SENSE_MASK
;
988 /* set port type to currently operating port type */
989 port_type
|= (dev
->caps
.port_type
[vhcr
->in_modifier
] & 0x3);
991 admin_link_state
= priv
->mfunc
.master
.vf_oper
[slave
].vport
[vhcr
->in_modifier
].state
.link_state
;
992 if (IFLA_VF_LINK_STATE_ENABLE
== admin_link_state
)
993 port_type
|= MLX4_PORT_LINK_UP_MASK
;
994 else if (IFLA_VF_LINK_STATE_DISABLE
== admin_link_state
)
995 port_type
&= ~MLX4_PORT_LINK_UP_MASK
;
997 MLX4_PUT(outbox
->buf
, port_type
,
998 QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
1000 if (dev
->caps
.port_type
[vhcr
->in_modifier
] == MLX4_PORT_TYPE_ETH
)
1001 short_field
= mlx4_get_slave_num_gids(dev
, slave
, port
);
1003 short_field
= 1; /* slave max gids */
1004 MLX4_PUT(outbox
->buf
, short_field
,
1005 QUERY_PORT_CUR_MAX_GID_OFFSET
);
1007 short_field
= dev
->caps
.pkey_table_len
[vhcr
->in_modifier
];
1008 MLX4_PUT(outbox
->buf
, short_field
,
1009 QUERY_PORT_CUR_MAX_PKEY_OFFSET
);
1015 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev
*dev
, u8 port
,
1016 int *gid_tbl_len
, int *pkey_tbl_len
)
1018 struct mlx4_cmd_mailbox
*mailbox
;
1023 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1024 if (IS_ERR(mailbox
))
1025 return PTR_ERR(mailbox
);
1027 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, port
, 0,
1028 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
1033 outbox
= mailbox
->buf
;
1035 MLX4_GET(field
, outbox
, QUERY_PORT_CUR_MAX_GID_OFFSET
);
1036 *gid_tbl_len
= field
;
1038 MLX4_GET(field
, outbox
, QUERY_PORT_CUR_MAX_PKEY_OFFSET
);
1039 *pkey_tbl_len
= field
;
1042 mlx4_free_cmd_mailbox(dev
, mailbox
);
1045 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len
);
1047 int mlx4_map_cmd(struct mlx4_dev
*dev
, u16 op
, struct mlx4_icm
*icm
, u64 virt
)
1049 struct mlx4_cmd_mailbox
*mailbox
;
1050 struct mlx4_icm_iter iter
;
1058 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1059 if (IS_ERR(mailbox
))
1060 return PTR_ERR(mailbox
);
1061 pages
= mailbox
->buf
;
1063 for (mlx4_icm_first(icm
, &iter
);
1064 !mlx4_icm_last(&iter
);
1065 mlx4_icm_next(&iter
)) {
1067 * We have to pass pages that are aligned to their
1068 * size, so find the least significant 1 in the
1069 * address or size and use that as our log2 size.
1071 lg
= ffs(mlx4_icm_addr(&iter
) | mlx4_icm_size(&iter
)) - 1;
1072 if (lg
< MLX4_ICM_PAGE_SHIFT
) {
1073 mlx4_warn(dev
, "Got FW area not aligned to %d (%llx/%lx).\n",
1075 (unsigned long long) mlx4_icm_addr(&iter
),
1076 mlx4_icm_size(&iter
));
1081 for (i
= 0; i
< mlx4_icm_size(&iter
) >> lg
; ++i
) {
1083 pages
[nent
* 2] = cpu_to_be64(virt
);
1087 pages
[nent
* 2 + 1] =
1088 cpu_to_be64((mlx4_icm_addr(&iter
) + (i
<< lg
)) |
1089 (lg
- MLX4_ICM_PAGE_SHIFT
));
1090 ts
+= 1 << (lg
- 10);
1093 if (++nent
== MLX4_MAILBOX_SIZE
/ 16) {
1094 err
= mlx4_cmd(dev
, mailbox
->dma
, nent
, 0, op
,
1095 MLX4_CMD_TIME_CLASS_B
,
1105 err
= mlx4_cmd(dev
, mailbox
->dma
, nent
, 0, op
,
1106 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1111 case MLX4_CMD_MAP_FA
:
1112 mlx4_dbg(dev
, "Mapped %d chunks/%d KB for FW.\n", tc
, ts
);
1114 case MLX4_CMD_MAP_ICM_AUX
:
1115 mlx4_dbg(dev
, "Mapped %d chunks/%d KB for ICM aux.\n", tc
, ts
);
1117 case MLX4_CMD_MAP_ICM
:
1118 mlx4_dbg(dev
, "Mapped %d chunks/%d KB at %llx for ICM.\n",
1119 tc
, ts
, (unsigned long long) virt
- (ts
<< 10));
1124 mlx4_free_cmd_mailbox(dev
, mailbox
);
1128 int mlx4_MAP_FA(struct mlx4_dev
*dev
, struct mlx4_icm
*icm
)
1130 return mlx4_map_cmd(dev
, MLX4_CMD_MAP_FA
, icm
, -1);
1133 int mlx4_UNMAP_FA(struct mlx4_dev
*dev
)
1135 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_UNMAP_FA
,
1136 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1140 int mlx4_RUN_FW(struct mlx4_dev
*dev
)
1142 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_RUN_FW
,
1143 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1146 int mlx4_QUERY_FW(struct mlx4_dev
*dev
)
1148 struct mlx4_fw
*fw
= &mlx4_priv(dev
)->fw
;
1149 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
1150 struct mlx4_cmd_mailbox
*mailbox
;
1157 #define QUERY_FW_OUT_SIZE 0x100
1158 #define QUERY_FW_VER_OFFSET 0x00
1159 #define QUERY_FW_PPF_ID 0x09
1160 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1161 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1162 #define QUERY_FW_ERR_START_OFFSET 0x30
1163 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1164 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1166 #define QUERY_FW_SIZE_OFFSET 0x00
1167 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1168 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1170 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1171 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1173 #define QUERY_FW_CLOCK_OFFSET 0x50
1174 #define QUERY_FW_CLOCK_BAR 0x58
1176 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1177 if (IS_ERR(mailbox
))
1178 return PTR_ERR(mailbox
);
1179 outbox
= mailbox
->buf
;
1181 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_FW
,
1182 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1186 MLX4_GET(fw_ver
, outbox
, QUERY_FW_VER_OFFSET
);
1188 * FW subminor version is at more significant bits than minor
1189 * version, so swap here.
1191 dev
->caps
.fw_ver
= (fw_ver
& 0xffff00000000ull
) |
1192 ((fw_ver
& 0xffff0000ull
) >> 16) |
1193 ((fw_ver
& 0x0000ffffull
) << 16);
1195 MLX4_GET(lg
, outbox
, QUERY_FW_PPF_ID
);
1196 dev
->caps
.function
= lg
;
1198 if (mlx4_is_slave(dev
))
1202 MLX4_GET(cmd_if_rev
, outbox
, QUERY_FW_CMD_IF_REV_OFFSET
);
1203 if (cmd_if_rev
< MLX4_COMMAND_INTERFACE_MIN_REV
||
1204 cmd_if_rev
> MLX4_COMMAND_INTERFACE_MAX_REV
) {
1205 mlx4_err(dev
, "Installed FW has unsupported "
1206 "command interface revision %d.\n",
1208 mlx4_err(dev
, "(Installed FW version is %d.%d.%03d)\n",
1209 (int) (dev
->caps
.fw_ver
>> 32),
1210 (int) (dev
->caps
.fw_ver
>> 16) & 0xffff,
1211 (int) dev
->caps
.fw_ver
& 0xffff);
1212 mlx4_err(dev
, "This driver version supports only revisions %d to %d.\n",
1213 MLX4_COMMAND_INTERFACE_MIN_REV
, MLX4_COMMAND_INTERFACE_MAX_REV
);
1218 if (cmd_if_rev
< MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS
)
1219 dev
->flags
|= MLX4_FLAG_OLD_PORT_CMDS
;
1221 MLX4_GET(lg
, outbox
, QUERY_FW_MAX_CMD_OFFSET
);
1222 cmd
->max_cmds
= 1 << lg
;
1224 mlx4_dbg(dev
, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1225 (int) (dev
->caps
.fw_ver
>> 32),
1226 (int) (dev
->caps
.fw_ver
>> 16) & 0xffff,
1227 (int) dev
->caps
.fw_ver
& 0xffff,
1228 cmd_if_rev
, cmd
->max_cmds
);
1230 MLX4_GET(fw
->catas_offset
, outbox
, QUERY_FW_ERR_START_OFFSET
);
1231 MLX4_GET(fw
->catas_size
, outbox
, QUERY_FW_ERR_SIZE_OFFSET
);
1232 MLX4_GET(fw
->catas_bar
, outbox
, QUERY_FW_ERR_BAR_OFFSET
);
1233 fw
->catas_bar
= (fw
->catas_bar
>> 6) * 2;
1235 mlx4_dbg(dev
, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1236 (unsigned long long) fw
->catas_offset
, fw
->catas_size
, fw
->catas_bar
);
1238 MLX4_GET(fw
->fw_pages
, outbox
, QUERY_FW_SIZE_OFFSET
);
1239 MLX4_GET(fw
->clr_int_base
, outbox
, QUERY_FW_CLR_INT_BASE_OFFSET
);
1240 MLX4_GET(fw
->clr_int_bar
, outbox
, QUERY_FW_CLR_INT_BAR_OFFSET
);
1241 fw
->clr_int_bar
= (fw
->clr_int_bar
>> 6) * 2;
1243 MLX4_GET(fw
->comm_base
, outbox
, QUERY_FW_COMM_BASE_OFFSET
);
1244 MLX4_GET(fw
->comm_bar
, outbox
, QUERY_FW_COMM_BAR_OFFSET
);
1245 fw
->comm_bar
= (fw
->comm_bar
>> 6) * 2;
1246 mlx4_dbg(dev
, "Communication vector bar:%d offset:0x%llx\n",
1247 fw
->comm_bar
, fw
->comm_base
);
1248 mlx4_dbg(dev
, "FW size %d KB\n", fw
->fw_pages
>> 2);
1250 MLX4_GET(fw
->clock_offset
, outbox
, QUERY_FW_CLOCK_OFFSET
);
1251 MLX4_GET(fw
->clock_bar
, outbox
, QUERY_FW_CLOCK_BAR
);
1252 fw
->clock_bar
= (fw
->clock_bar
>> 6) * 2;
1253 mlx4_dbg(dev
, "Internal clock bar:%d offset:0x%llx\n",
1254 fw
->clock_bar
, fw
->clock_offset
);
1257 * Round up number of system pages needed in case
1258 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1261 ALIGN(fw
->fw_pages
, PAGE_SIZE
/ MLX4_ICM_PAGE_SIZE
) >>
1262 (PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
);
1264 mlx4_dbg(dev
, "Clear int @ %llx, BAR %d\n",
1265 (unsigned long long) fw
->clr_int_base
, fw
->clr_int_bar
);
1268 mlx4_free_cmd_mailbox(dev
, mailbox
);
1272 int mlx4_QUERY_FW_wrapper(struct mlx4_dev
*dev
, int slave
,
1273 struct mlx4_vhcr
*vhcr
,
1274 struct mlx4_cmd_mailbox
*inbox
,
1275 struct mlx4_cmd_mailbox
*outbox
,
1276 struct mlx4_cmd_info
*cmd
)
1281 outbuf
= outbox
->buf
;
1282 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, 0, 0, MLX4_CMD_QUERY_FW
,
1283 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1287 /* for slaves, set pci PPF ID to invalid and zero out everything
1288 * else except FW version */
1289 outbuf
[0] = outbuf
[1] = 0;
1290 memset(&outbuf
[8], 0, QUERY_FW_OUT_SIZE
- 8);
1291 outbuf
[QUERY_FW_PPF_ID
] = MLX4_INVALID_SLAVE_ID
;
1296 static void get_board_id(void *vsd
, char *board_id
)
1300 #define VSD_OFFSET_SIG1 0x00
1301 #define VSD_OFFSET_SIG2 0xde
1302 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1303 #define VSD_OFFSET_TS_BOARD_ID 0x20
1305 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1307 memset(board_id
, 0, MLX4_BOARD_ID_LEN
);
1309 if (be16_to_cpup(vsd
+ VSD_OFFSET_SIG1
) == VSD_SIGNATURE_TOPSPIN
&&
1310 be16_to_cpup(vsd
+ VSD_OFFSET_SIG2
) == VSD_SIGNATURE_TOPSPIN
) {
1311 strlcpy(board_id
, vsd
+ VSD_OFFSET_TS_BOARD_ID
, MLX4_BOARD_ID_LEN
);
1314 * The board ID is a string but the firmware byte
1315 * swaps each 4-byte word before passing it back to
1316 * us. Therefore we need to swab it before printing.
1318 for (i
= 0; i
< 4; ++i
)
1319 ((u32
*) board_id
)[i
] =
1320 swab32(*(u32
*) (vsd
+ VSD_OFFSET_MLX_BOARD_ID
+ i
* 4));
1324 int mlx4_QUERY_ADAPTER(struct mlx4_dev
*dev
, struct mlx4_adapter
*adapter
)
1326 struct mlx4_cmd_mailbox
*mailbox
;
1330 #define QUERY_ADAPTER_OUT_SIZE 0x100
1331 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1332 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1334 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1335 if (IS_ERR(mailbox
))
1336 return PTR_ERR(mailbox
);
1337 outbox
= mailbox
->buf
;
1339 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_ADAPTER
,
1340 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1344 MLX4_GET(adapter
->inta_pin
, outbox
, QUERY_ADAPTER_INTA_PIN_OFFSET
);
1346 get_board_id(outbox
+ QUERY_ADAPTER_VSD_OFFSET
/ 4,
1350 mlx4_free_cmd_mailbox(dev
, mailbox
);
1354 int mlx4_INIT_HCA(struct mlx4_dev
*dev
, struct mlx4_init_hca_param
*param
)
1356 struct mlx4_cmd_mailbox
*mailbox
;
1360 #define INIT_HCA_IN_SIZE 0x200
1361 #define INIT_HCA_VERSION_OFFSET 0x000
1362 #define INIT_HCA_VERSION 2
1363 #define INIT_HCA_VXLAN_OFFSET 0x0c
1364 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1365 #define INIT_HCA_FLAGS_OFFSET 0x014
1366 #define INIT_HCA_QPC_OFFSET 0x020
1367 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1368 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1369 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1370 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1371 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1372 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1373 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1374 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1375 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1376 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1377 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1378 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1379 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1380 #define INIT_HCA_MCAST_OFFSET 0x0c0
1381 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1382 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1383 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1384 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1385 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1386 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1387 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1388 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1389 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1390 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1391 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1392 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1393 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1394 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1395 #define INIT_HCA_TPT_OFFSET 0x0f0
1396 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1397 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1398 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1399 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1400 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1401 #define INIT_HCA_UAR_OFFSET 0x120
1402 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1403 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1405 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1406 if (IS_ERR(mailbox
))
1407 return PTR_ERR(mailbox
);
1408 inbox
= mailbox
->buf
;
1410 *((u8
*) mailbox
->buf
+ INIT_HCA_VERSION_OFFSET
) = INIT_HCA_VERSION
;
1412 *((u8
*) mailbox
->buf
+ INIT_HCA_CACHELINE_SZ_OFFSET
) =
1413 (ilog2(cache_line_size()) - 4) << 5;
1415 #if defined(__LITTLE_ENDIAN)
1416 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) &= ~cpu_to_be32(1 << 1);
1417 #elif defined(__BIG_ENDIAN)
1418 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 1);
1420 #error Host endianness not defined
1422 /* Check port for UD address vector: */
1423 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1);
1425 /* Enable IPoIB checksumming if we can: */
1426 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IPOIB_CSUM
)
1427 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 3);
1429 /* Enable QoS support if module parameter set */
1431 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 2);
1433 /* enable counters */
1434 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
)
1435 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 4);
1437 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1438 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_64B_EQE
) {
1439 *(inbox
+ INIT_HCA_EQE_CQE_OFFSETS
/ 4) |= cpu_to_be32(1 << 29);
1440 dev
->caps
.eqe_size
= 64;
1441 dev
->caps
.eqe_factor
= 1;
1443 dev
->caps
.eqe_size
= 32;
1444 dev
->caps
.eqe_factor
= 0;
1447 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_64B_CQE
) {
1448 *(inbox
+ INIT_HCA_EQE_CQE_OFFSETS
/ 4) |= cpu_to_be32(1 << 30);
1449 dev
->caps
.cqe_size
= 64;
1450 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_64B_CQE
;
1452 dev
->caps
.cqe_size
= 32;
1455 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1457 MLX4_PUT(inbox
, param
->qpc_base
, INIT_HCA_QPC_BASE_OFFSET
);
1458 MLX4_PUT(inbox
, param
->log_num_qps
, INIT_HCA_LOG_QP_OFFSET
);
1459 MLX4_PUT(inbox
, param
->srqc_base
, INIT_HCA_SRQC_BASE_OFFSET
);
1460 MLX4_PUT(inbox
, param
->log_num_srqs
, INIT_HCA_LOG_SRQ_OFFSET
);
1461 MLX4_PUT(inbox
, param
->cqc_base
, INIT_HCA_CQC_BASE_OFFSET
);
1462 MLX4_PUT(inbox
, param
->log_num_cqs
, INIT_HCA_LOG_CQ_OFFSET
);
1463 MLX4_PUT(inbox
, param
->altc_base
, INIT_HCA_ALTC_BASE_OFFSET
);
1464 MLX4_PUT(inbox
, param
->auxc_base
, INIT_HCA_AUXC_BASE_OFFSET
);
1465 MLX4_PUT(inbox
, param
->eqc_base
, INIT_HCA_EQC_BASE_OFFSET
);
1466 MLX4_PUT(inbox
, param
->log_num_eqs
, INIT_HCA_LOG_EQ_OFFSET
);
1467 MLX4_PUT(inbox
, param
->rdmarc_base
, INIT_HCA_RDMARC_BASE_OFFSET
);
1468 MLX4_PUT(inbox
, param
->log_rd_per_qp
, INIT_HCA_LOG_RD_OFFSET
);
1470 /* steering attributes */
1471 if (dev
->caps
.steering_mode
==
1472 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1473 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |=
1475 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN
);
1477 MLX4_PUT(inbox
, param
->mc_base
, INIT_HCA_FS_BASE_OFFSET
);
1478 MLX4_PUT(inbox
, param
->log_mc_entry_sz
,
1479 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET
);
1480 MLX4_PUT(inbox
, param
->log_mc_table_sz
,
1481 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET
);
1482 /* Enable Ethernet flow steering
1483 * with udp unicast and tcp unicast
1485 MLX4_PUT(inbox
, (u8
) (MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
),
1486 INIT_HCA_FS_ETH_BITS_OFFSET
);
1487 MLX4_PUT(inbox
, (u16
) MLX4_FS_NUM_OF_L2_ADDR
,
1488 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET
);
1489 /* Enable IPoIB flow steering
1490 * with udp unicast and tcp unicast
1492 MLX4_PUT(inbox
, (u8
) (MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
),
1493 INIT_HCA_FS_IB_BITS_OFFSET
);
1494 MLX4_PUT(inbox
, (u16
) MLX4_FS_NUM_OF_L2_ADDR
,
1495 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET
);
1497 MLX4_PUT(inbox
, param
->mc_base
, INIT_HCA_MC_BASE_OFFSET
);
1498 MLX4_PUT(inbox
, param
->log_mc_entry_sz
,
1499 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET
);
1500 MLX4_PUT(inbox
, param
->log_mc_hash_sz
,
1501 INIT_HCA_LOG_MC_HASH_SZ_OFFSET
);
1502 MLX4_PUT(inbox
, param
->log_mc_table_sz
,
1503 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET
);
1504 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_B0
)
1505 MLX4_PUT(inbox
, (u8
) (1 << 3),
1506 INIT_HCA_UC_STEERING_OFFSET
);
1509 /* TPT attributes */
1511 MLX4_PUT(inbox
, param
->dmpt_base
, INIT_HCA_DMPT_BASE_OFFSET
);
1512 MLX4_PUT(inbox
, param
->mw_enabled
, INIT_HCA_TPT_MW_OFFSET
);
1513 MLX4_PUT(inbox
, param
->log_mpt_sz
, INIT_HCA_LOG_MPT_SZ_OFFSET
);
1514 MLX4_PUT(inbox
, param
->mtt_base
, INIT_HCA_MTT_BASE_OFFSET
);
1515 MLX4_PUT(inbox
, param
->cmpt_base
, INIT_HCA_CMPT_BASE_OFFSET
);
1517 /* UAR attributes */
1519 MLX4_PUT(inbox
, param
->uar_page_sz
, INIT_HCA_UAR_PAGE_SZ_OFFSET
);
1520 MLX4_PUT(inbox
, param
->log_uar_sz
, INIT_HCA_LOG_UAR_SZ_OFFSET
);
1522 /* set parser VXLAN attributes */
1523 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
) {
1524 u8 parser_params
= 0;
1525 MLX4_PUT(inbox
, parser_params
, INIT_HCA_VXLAN_OFFSET
);
1528 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_INIT_HCA
, 10000,
1532 mlx4_err(dev
, "INIT_HCA returns %d\n", err
);
1534 mlx4_free_cmd_mailbox(dev
, mailbox
);
1538 int mlx4_QUERY_HCA(struct mlx4_dev
*dev
,
1539 struct mlx4_init_hca_param
*param
)
1541 struct mlx4_cmd_mailbox
*mailbox
;
1547 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1548 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1550 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1551 if (IS_ERR(mailbox
))
1552 return PTR_ERR(mailbox
);
1553 outbox
= mailbox
->buf
;
1555 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
1557 MLX4_CMD_TIME_CLASS_B
,
1558 !mlx4_is_slave(dev
));
1562 MLX4_GET(param
->global_caps
, outbox
, QUERY_HCA_GLOBAL_CAPS_OFFSET
);
1563 MLX4_GET(param
->hca_core_clock
, outbox
, QUERY_HCA_CORE_CLOCK_OFFSET
);
1565 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1567 MLX4_GET(param
->qpc_base
, outbox
, INIT_HCA_QPC_BASE_OFFSET
);
1568 MLX4_GET(param
->log_num_qps
, outbox
, INIT_HCA_LOG_QP_OFFSET
);
1569 MLX4_GET(param
->srqc_base
, outbox
, INIT_HCA_SRQC_BASE_OFFSET
);
1570 MLX4_GET(param
->log_num_srqs
, outbox
, INIT_HCA_LOG_SRQ_OFFSET
);
1571 MLX4_GET(param
->cqc_base
, outbox
, INIT_HCA_CQC_BASE_OFFSET
);
1572 MLX4_GET(param
->log_num_cqs
, outbox
, INIT_HCA_LOG_CQ_OFFSET
);
1573 MLX4_GET(param
->altc_base
, outbox
, INIT_HCA_ALTC_BASE_OFFSET
);
1574 MLX4_GET(param
->auxc_base
, outbox
, INIT_HCA_AUXC_BASE_OFFSET
);
1575 MLX4_GET(param
->eqc_base
, outbox
, INIT_HCA_EQC_BASE_OFFSET
);
1576 MLX4_GET(param
->log_num_eqs
, outbox
, INIT_HCA_LOG_EQ_OFFSET
);
1577 MLX4_GET(param
->rdmarc_base
, outbox
, INIT_HCA_RDMARC_BASE_OFFSET
);
1578 MLX4_GET(param
->log_rd_per_qp
, outbox
, INIT_HCA_LOG_RD_OFFSET
);
1580 MLX4_GET(dword_field
, outbox
, INIT_HCA_FLAGS_OFFSET
);
1581 if (dword_field
& (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN
)) {
1582 param
->steering_mode
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
1584 MLX4_GET(byte_field
, outbox
, INIT_HCA_UC_STEERING_OFFSET
);
1585 if (byte_field
& 0x8)
1586 param
->steering_mode
= MLX4_STEERING_MODE_B0
;
1588 param
->steering_mode
= MLX4_STEERING_MODE_A0
;
1590 /* steering attributes */
1591 if (param
->steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1592 MLX4_GET(param
->mc_base
, outbox
, INIT_HCA_FS_BASE_OFFSET
);
1593 MLX4_GET(param
->log_mc_entry_sz
, outbox
,
1594 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET
);
1595 MLX4_GET(param
->log_mc_table_sz
, outbox
,
1596 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET
);
1598 MLX4_GET(param
->mc_base
, outbox
, INIT_HCA_MC_BASE_OFFSET
);
1599 MLX4_GET(param
->log_mc_entry_sz
, outbox
,
1600 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET
);
1601 MLX4_GET(param
->log_mc_hash_sz
, outbox
,
1602 INIT_HCA_LOG_MC_HASH_SZ_OFFSET
);
1603 MLX4_GET(param
->log_mc_table_sz
, outbox
,
1604 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET
);
1607 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1608 MLX4_GET(byte_field
, outbox
, INIT_HCA_EQE_CQE_OFFSETS
);
1609 if (byte_field
& 0x20) /* 64-bytes eqe enabled */
1610 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_EQE_ENABLED
;
1611 if (byte_field
& 0x40) /* 64-bytes cqe enabled */
1612 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_CQE_ENABLED
;
1614 /* TPT attributes */
1616 MLX4_GET(param
->dmpt_base
, outbox
, INIT_HCA_DMPT_BASE_OFFSET
);
1617 MLX4_GET(param
->mw_enabled
, outbox
, INIT_HCA_TPT_MW_OFFSET
);
1618 MLX4_GET(param
->log_mpt_sz
, outbox
, INIT_HCA_LOG_MPT_SZ_OFFSET
);
1619 MLX4_GET(param
->mtt_base
, outbox
, INIT_HCA_MTT_BASE_OFFSET
);
1620 MLX4_GET(param
->cmpt_base
, outbox
, INIT_HCA_CMPT_BASE_OFFSET
);
1622 /* UAR attributes */
1624 MLX4_GET(param
->uar_page_sz
, outbox
, INIT_HCA_UAR_PAGE_SZ_OFFSET
);
1625 MLX4_GET(param
->log_uar_sz
, outbox
, INIT_HCA_LOG_UAR_SZ_OFFSET
);
1628 mlx4_free_cmd_mailbox(dev
, mailbox
);
1633 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1634 * and real QP0 are active, so that the paravirtualized QP0 is ready
1636 static int check_qp0_state(struct mlx4_dev
*dev
, int function
, int port
)
1638 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1639 /* irrelevant if not infiniband */
1640 if (priv
->mfunc
.master
.qp0_state
[port
].proxy_qp0_active
&&
1641 priv
->mfunc
.master
.qp0_state
[port
].qp0_active
)
1646 int mlx4_INIT_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
1647 struct mlx4_vhcr
*vhcr
,
1648 struct mlx4_cmd_mailbox
*inbox
,
1649 struct mlx4_cmd_mailbox
*outbox
,
1650 struct mlx4_cmd_info
*cmd
)
1652 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1653 int port
= mlx4_slave_convert_port(dev
, slave
, vhcr
->in_modifier
);
1659 if (priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
& (1 << port
))
1662 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
) {
1663 /* Enable port only if it was previously disabled */
1664 if (!priv
->mfunc
.master
.init_port_ref
[port
]) {
1665 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1666 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1670 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1672 if (slave
== mlx4_master_func_num(dev
)) {
1673 if (check_qp0_state(dev
, slave
, port
) &&
1674 !priv
->mfunc
.master
.qp0_state
[port
].port_active
) {
1675 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1676 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1679 priv
->mfunc
.master
.qp0_state
[port
].port_active
= 1;
1680 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1683 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1685 ++priv
->mfunc
.master
.init_port_ref
[port
];
1689 int mlx4_INIT_PORT(struct mlx4_dev
*dev
, int port
)
1691 struct mlx4_cmd_mailbox
*mailbox
;
1697 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
1698 #define INIT_PORT_IN_SIZE 256
1699 #define INIT_PORT_FLAGS_OFFSET 0x00
1700 #define INIT_PORT_FLAG_SIG (1 << 18)
1701 #define INIT_PORT_FLAG_NG (1 << 17)
1702 #define INIT_PORT_FLAG_G0 (1 << 16)
1703 #define INIT_PORT_VL_SHIFT 4
1704 #define INIT_PORT_PORT_WIDTH_SHIFT 8
1705 #define INIT_PORT_MTU_OFFSET 0x04
1706 #define INIT_PORT_MAX_GID_OFFSET 0x06
1707 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
1708 #define INIT_PORT_GUID0_OFFSET 0x10
1709 #define INIT_PORT_NODE_GUID_OFFSET 0x18
1710 #define INIT_PORT_SI_GUID_OFFSET 0x20
1712 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1713 if (IS_ERR(mailbox
))
1714 return PTR_ERR(mailbox
);
1715 inbox
= mailbox
->buf
;
1718 flags
|= (dev
->caps
.vl_cap
[port
] & 0xf) << INIT_PORT_VL_SHIFT
;
1719 flags
|= (dev
->caps
.port_width_cap
[port
] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT
;
1720 MLX4_PUT(inbox
, flags
, INIT_PORT_FLAGS_OFFSET
);
1722 field
= 128 << dev
->caps
.ib_mtu_cap
[port
];
1723 MLX4_PUT(inbox
, field
, INIT_PORT_MTU_OFFSET
);
1724 field
= dev
->caps
.gid_table_len
[port
];
1725 MLX4_PUT(inbox
, field
, INIT_PORT_MAX_GID_OFFSET
);
1726 field
= dev
->caps
.pkey_table_len
[port
];
1727 MLX4_PUT(inbox
, field
, INIT_PORT_MAX_PKEY_OFFSET
);
1729 err
= mlx4_cmd(dev
, mailbox
->dma
, port
, 0, MLX4_CMD_INIT_PORT
,
1730 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1732 mlx4_free_cmd_mailbox(dev
, mailbox
);
1734 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1735 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
1739 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT
);
1741 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
1742 struct mlx4_vhcr
*vhcr
,
1743 struct mlx4_cmd_mailbox
*inbox
,
1744 struct mlx4_cmd_mailbox
*outbox
,
1745 struct mlx4_cmd_info
*cmd
)
1747 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1748 int port
= mlx4_slave_convert_port(dev
, slave
, vhcr
->in_modifier
);
1754 if (!(priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&
1758 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
) {
1759 if (priv
->mfunc
.master
.init_port_ref
[port
] == 1) {
1760 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
1761 1000, MLX4_CMD_NATIVE
);
1765 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
1767 /* infiniband port */
1768 if (slave
== mlx4_master_func_num(dev
)) {
1769 if (!priv
->mfunc
.master
.qp0_state
[port
].qp0_active
&&
1770 priv
->mfunc
.master
.qp0_state
[port
].port_active
) {
1771 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
1772 1000, MLX4_CMD_NATIVE
);
1775 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
1776 priv
->mfunc
.master
.qp0_state
[port
].port_active
= 0;
1779 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
1781 --priv
->mfunc
.master
.init_port_ref
[port
];
1785 int mlx4_CLOSE_PORT(struct mlx4_dev
*dev
, int port
)
1787 return mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
, 1000,
1790 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT
);
1792 int mlx4_CLOSE_HCA(struct mlx4_dev
*dev
, int panic
)
1794 return mlx4_cmd(dev
, 0, 0, panic
, MLX4_CMD_CLOSE_HCA
, 1000,
1798 struct mlx4_config_dev
{
1799 __be32 update_flags
;
1801 __be16 vxlan_udp_dport
;
1805 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
1807 static int mlx4_CONFIG_DEV(struct mlx4_dev
*dev
, struct mlx4_config_dev
*config_dev
)
1810 struct mlx4_cmd_mailbox
*mailbox
;
1812 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1813 if (IS_ERR(mailbox
))
1814 return PTR_ERR(mailbox
);
1816 memcpy(mailbox
->buf
, config_dev
, sizeof(*config_dev
));
1818 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_CONFIG_DEV
,
1819 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1821 mlx4_free_cmd_mailbox(dev
, mailbox
);
1825 int mlx4_config_vxlan_port(struct mlx4_dev
*dev
, __be16 udp_port
)
1827 struct mlx4_config_dev config_dev
;
1829 memset(&config_dev
, 0, sizeof(config_dev
));
1830 config_dev
.update_flags
= cpu_to_be32(MLX4_VXLAN_UDP_DPORT
);
1831 config_dev
.vxlan_udp_dport
= udp_port
;
1833 return mlx4_CONFIG_DEV(dev
, &config_dev
);
1835 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port
);
1838 int mlx4_SET_ICM_SIZE(struct mlx4_dev
*dev
, u64 icm_size
, u64
*aux_pages
)
1840 int ret
= mlx4_cmd_imm(dev
, icm_size
, aux_pages
, 0, 0,
1841 MLX4_CMD_SET_ICM_SIZE
,
1842 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1847 * Round up number of system pages needed in case
1848 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1850 *aux_pages
= ALIGN(*aux_pages
, PAGE_SIZE
/ MLX4_ICM_PAGE_SIZE
) >>
1851 (PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
);
1856 int mlx4_NOP(struct mlx4_dev
*dev
)
1858 /* Input modifier of 0x1f means "finish as soon as possible." */
1859 return mlx4_cmd(dev
, 0, 0x1f, 0, MLX4_CMD_NOP
, 100, MLX4_CMD_NATIVE
);
1862 int mlx4_get_phys_port_id(struct mlx4_dev
*dev
)
1866 struct mlx4_cmd_mailbox
*mailbox
;
1868 u32 guid_hi
, guid_lo
;
1870 #define MOD_STAT_CFG_PORT_OFFSET 8
1871 #define MOD_STAT_CFG_GUID_H 0X14
1872 #define MOD_STAT_CFG_GUID_L 0X1c
1874 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1875 if (IS_ERR(mailbox
))
1876 return PTR_ERR(mailbox
);
1877 outbox
= mailbox
->buf
;
1879 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
1880 in_mod
= port
<< MOD_STAT_CFG_PORT_OFFSET
;
1881 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, in_mod
, 0x2,
1882 MLX4_CMD_MOD_STAT_CFG
, MLX4_CMD_TIME_CLASS_A
,
1885 mlx4_err(dev
, "Fail to get port %d uplink guid\n",
1889 MLX4_GET(guid_hi
, outbox
, MOD_STAT_CFG_GUID_H
);
1890 MLX4_GET(guid_lo
, outbox
, MOD_STAT_CFG_GUID_L
);
1891 dev
->caps
.phys_port_id
[port
] = (u64
)guid_lo
|
1895 mlx4_free_cmd_mailbox(dev
, mailbox
);
1899 #define MLX4_WOL_SETUP_MODE (5 << 28)
1900 int mlx4_wol_read(struct mlx4_dev
*dev
, u64
*config
, int port
)
1902 u32 in_mod
= MLX4_WOL_SETUP_MODE
| port
<< 8;
1904 return mlx4_cmd_imm(dev
, 0, config
, in_mod
, 0x3,
1905 MLX4_CMD_MOD_STAT_CFG
, MLX4_CMD_TIME_CLASS_A
,
1908 EXPORT_SYMBOL_GPL(mlx4_wol_read
);
1910 int mlx4_wol_write(struct mlx4_dev
*dev
, u64 config
, int port
)
1912 u32 in_mod
= MLX4_WOL_SETUP_MODE
| port
<< 8;
1914 return mlx4_cmd(dev
, config
, in_mod
, 0x1, MLX4_CMD_MOD_STAT_CFG
,
1915 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1917 EXPORT_SYMBOL_GPL(mlx4_wol_write
);
1924 void mlx4_opreq_action(struct work_struct
*work
)
1926 struct mlx4_priv
*priv
= container_of(work
, struct mlx4_priv
,
1928 struct mlx4_dev
*dev
= &priv
->dev
;
1929 int num_tasks
= atomic_read(&priv
->opreq_count
);
1930 struct mlx4_cmd_mailbox
*mailbox
;
1931 struct mlx4_mgm
*mgm
;
1943 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
1944 #define GET_OP_REQ_TOKEN_OFFSET 0x14
1945 #define GET_OP_REQ_TYPE_OFFSET 0x1a
1946 #define GET_OP_REQ_DATA_OFFSET 0x20
1948 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1949 if (IS_ERR(mailbox
)) {
1950 mlx4_err(dev
, "Failed to allocate mailbox for GET_OP_REQ\n");
1953 outbox
= mailbox
->buf
;
1956 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
1957 MLX4_CMD_GET_OP_REQ
, MLX4_CMD_TIME_CLASS_A
,
1960 mlx4_err(dev
, "Failed to retrieve required operation: %d\n",
1964 MLX4_GET(modifier
, outbox
, GET_OP_REQ_MODIFIER_OFFSET
);
1965 MLX4_GET(token
, outbox
, GET_OP_REQ_TOKEN_OFFSET
);
1966 MLX4_GET(type
, outbox
, GET_OP_REQ_TYPE_OFFSET
);
1971 if (dev
->caps
.steering_mode
==
1972 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1973 mlx4_warn(dev
, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
1977 mgm
= (struct mlx4_mgm
*)((u8
*)(outbox
) +
1978 GET_OP_REQ_DATA_OFFSET
);
1979 num_qps
= be32_to_cpu(mgm
->members_count
) &
1981 rem_mcg
= ((u8
*)(&mgm
->members_count
))[0] & 1;
1982 prot
= ((u8
*)(&mgm
->members_count
))[0] >> 6;
1984 for (i
= 0; i
< num_qps
; i
++) {
1985 qp
.qpn
= be32_to_cpu(mgm
->qp
[i
]);
1987 err
= mlx4_multicast_detach(dev
, &qp
,
1991 err
= mlx4_multicast_attach(dev
, &qp
,
2001 mlx4_warn(dev
, "Bad type for required operation\n");
2005 err
= mlx4_cmd(dev
, 0, ((u32
) err
|
2006 (__force u32
)cpu_to_be32(token
) << 16),
2007 1, MLX4_CMD_GET_OP_REQ
, MLX4_CMD_TIME_CLASS_A
,
2010 mlx4_err(dev
, "Failed to acknowledge required request: %d\n",
2014 memset(outbox
, 0, 0xffc);
2015 num_tasks
= atomic_dec_return(&priv
->opreq_count
);
2019 mlx4_free_cmd_mailbox(dev
, mailbox
);