2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
44 MLX4_COMMAND_INTERFACE_MIN_REV
= 2,
45 MLX4_COMMAND_INTERFACE_MAX_REV
= 3,
46 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS
= 3,
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
52 static bool enable_qos
;
53 module_param(enable_qos
, bool, 0444);
54 MODULE_PARM_DESC(enable_qos
, "Enable Quality of Service support in the HCA (default: off)");
56 #define MLX4_GET(dest, source, offset) \
58 void *__p = (char *) (source) + (offset); \
59 switch (sizeof (dest)) { \
60 case 1: (dest) = *(u8 *) __p; break; \
61 case 2: (dest) = be16_to_cpup(__p); break; \
62 case 4: (dest) = be32_to_cpup(__p); break; \
63 case 8: (dest) = be64_to_cpup(__p); break; \
64 default: __buggy_use_of_MLX4_GET(); \
68 #define MLX4_PUT(dest, source, offset) \
70 void *__d = ((char *) (dest) + (offset)); \
71 switch (sizeof(source)) { \
72 case 1: *(u8 *) __d = (source); break; \
73 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
74 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
75 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
76 default: __buggy_use_of_MLX4_PUT(); \
80 static void dump_dev_cap_flags(struct mlx4_dev
*dev
, u64 flags
)
82 static const char *fname
[] = {
83 [ 0] = "RC transport",
84 [ 1] = "UC transport",
85 [ 2] = "UD transport",
86 [ 3] = "XRC transport",
87 [ 4] = "reliable multicast",
88 [ 5] = "FCoIB support",
90 [ 7] = "IPoIB checksum offload",
91 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter",
94 [12] = "Dual Port Different Protocol (DPDP) support",
95 [15] = "Big LSO headers",
98 [18] = "Atomic ops support",
99 [19] = "Raw multicast support",
100 [20] = "Address vector port checking support",
101 [21] = "UD multicast support",
102 [24] = "Demand paging support",
103 [25] = "Router support",
104 [30] = "IBoE support",
105 [32] = "Unicast loopback support",
106 [34] = "FCS header control",
107 [38] = "Wake On LAN support",
108 [40] = "UDP RSS support",
109 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support",
111 [48] = "Counters support",
112 [53] = "Port ETS Scheduler support",
113 [55] = "Port link type sensing support",
114 [59] = "Port management change event support",
115 [61] = "64 byte EQE support",
116 [62] = "64 byte CQE support",
120 mlx4_dbg(dev
, "DEV_CAP flags:\n");
121 for (i
= 0; i
< ARRAY_SIZE(fname
); ++i
)
122 if (fname
[i
] && (flags
& (1LL << i
)))
123 mlx4_dbg(dev
, " %s\n", fname
[i
]);
126 static void dump_dev_cap_flags2(struct mlx4_dev
*dev
, u64 flags
)
128 static const char * const fname
[] = {
130 [1] = "RSS Toeplitz Hash Function support",
131 [2] = "RSS XOR Hash Function support",
132 [3] = "Device managed flow steering support",
133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support",
135 [6] = "VST (control vlan insertion/stripping) support",
136 [7] = "FSM (MAC anti-spoofing) support",
137 [8] = "Dynamic QP updates support",
138 [9] = "Device managed flow steering IPoIB support",
139 [10] = "TCP/IP offloads/flow-steering for VXLAN support"
143 for (i
= 0; i
< ARRAY_SIZE(fname
); ++i
)
144 if (fname
[i
] && (flags
& (1LL << i
)))
145 mlx4_dbg(dev
, " %s\n", fname
[i
]);
148 int mlx4_MOD_STAT_CFG(struct mlx4_dev
*dev
, struct mlx4_mod_stat_cfg
*cfg
)
150 struct mlx4_cmd_mailbox
*mailbox
;
154 #define MOD_STAT_CFG_IN_SIZE 0x100
156 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
157 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
159 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
161 return PTR_ERR(mailbox
);
162 inbox
= mailbox
->buf
;
164 MLX4_PUT(inbox
, cfg
->log_pg_sz
, MOD_STAT_CFG_PG_SZ_OFFSET
);
165 MLX4_PUT(inbox
, cfg
->log_pg_sz_m
, MOD_STAT_CFG_PG_SZ_M_OFFSET
);
167 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_MOD_STAT_CFG
,
168 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
170 mlx4_free_cmd_mailbox(dev
, mailbox
);
174 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev
*dev
, int slave
,
175 struct mlx4_vhcr
*vhcr
,
176 struct mlx4_cmd_mailbox
*inbox
,
177 struct mlx4_cmd_mailbox
*outbox
,
178 struct mlx4_cmd_info
*cmd
)
180 struct mlx4_priv
*priv
= mlx4_priv(dev
);
182 u32 size
, proxy_qp
, qkey
;
185 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
186 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
187 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
188 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
189 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
190 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
191 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
192 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
193 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
194 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
195 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
196 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
198 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
199 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
200 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
201 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
202 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
203 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
205 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
206 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
207 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
208 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
210 /* when opcode modifier = 1 */
211 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
212 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4
213 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
214 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
216 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
217 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
218 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
219 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
220 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
222 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
223 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
224 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
225 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
227 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
229 if (vhcr
->op_modifier
== 1) {
230 struct mlx4_active_ports actv_ports
=
231 mlx4_get_active_ports(dev
, slave
);
232 int converted_port
= mlx4_slave_convert_port(
233 dev
, slave
, vhcr
->in_modifier
);
235 if (converted_port
< 0)
238 vhcr
->in_modifier
= converted_port
;
239 /* phys-port = logical-port */
240 field
= vhcr
->in_modifier
-
241 find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
242 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_PHYS_PORT_OFFSET
);
244 port
= vhcr
->in_modifier
;
245 proxy_qp
= dev
->phys_caps
.base_proxy_sqpn
+ 8 * slave
+ port
- 1;
247 /* Set nic_info bit to mark new fields support */
248 field
= QUERY_FUNC_CAP_FLAGS1_NIC_INFO
;
250 if (mlx4_vf_smi_enabled(dev
, slave
, port
) &&
251 !mlx4_get_parav_qkey(dev
, proxy_qp
, &qkey
)) {
252 field
|= QUERY_FUNC_CAP_VF_ENABLE_QP0
;
253 MLX4_PUT(outbox
->buf
, qkey
,
254 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET
);
256 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FLAGS1_OFFSET
);
258 /* size is now the QP number */
259 size
= dev
->phys_caps
.base_tunnel_sqpn
+ 8 * slave
+ port
- 1;
260 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP0_TUNNEL
);
263 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP1_TUNNEL
);
265 MLX4_PUT(outbox
->buf
, proxy_qp
, QUERY_FUNC_CAP_QP0_PROXY
);
267 MLX4_PUT(outbox
->buf
, proxy_qp
, QUERY_FUNC_CAP_QP1_PROXY
);
269 MLX4_PUT(outbox
->buf
, dev
->caps
.phys_port_id
[vhcr
->in_modifier
],
270 QUERY_FUNC_CAP_PHYS_PORT_ID
);
272 } else if (vhcr
->op_modifier
== 0) {
273 struct mlx4_active_ports actv_ports
=
274 mlx4_get_active_ports(dev
, slave
);
275 /* enable rdma and ethernet interfaces, and new quota locations */
276 field
= (QUERY_FUNC_CAP_FLAG_ETH
| QUERY_FUNC_CAP_FLAG_RDMA
|
277 QUERY_FUNC_CAP_FLAG_QUOTAS
);
278 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FLAGS_OFFSET
);
281 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
),
282 dev
->caps
.num_ports
);
283 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_NUM_PORTS_OFFSET
);
285 size
= dev
->caps
.function_caps
; /* set PF behaviours */
286 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_PF_BHVR_OFFSET
);
288 field
= 0; /* protected FMR support not available as yet */
289 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FMR_OFFSET
);
291 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[slave
];
292 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET
);
293 size
= dev
->caps
.num_qps
;
294 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP
);
296 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[slave
];
297 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET
);
298 size
= dev
->caps
.num_srqs
;
299 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP
);
301 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[slave
];
302 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET
);
303 size
= dev
->caps
.num_cqs
;
304 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP
);
306 size
= dev
->caps
.num_eqs
;
307 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
309 size
= dev
->caps
.reserved_eqs
;
310 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
312 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[slave
];
313 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET
);
314 size
= dev
->caps
.num_mpts
;
315 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP
);
317 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[slave
];
318 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET
);
319 size
= dev
->caps
.num_mtts
;
320 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP
);
322 size
= dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
;
323 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET
);
324 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP
);
332 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev
*dev
, u32 gen_or_port
,
333 struct mlx4_func_cap
*func_cap
)
335 struct mlx4_cmd_mailbox
*mailbox
;
337 u8 field
, op_modifier
;
339 int err
= 0, quotas
= 0;
341 op_modifier
= !!gen_or_port
; /* 0 = general, 1 = logical port */
343 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
345 return PTR_ERR(mailbox
);
347 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, gen_or_port
, op_modifier
,
348 MLX4_CMD_QUERY_FUNC_CAP
,
349 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
353 outbox
= mailbox
->buf
;
356 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_FLAGS_OFFSET
);
357 if (!(field
& (QUERY_FUNC_CAP_FLAG_ETH
| QUERY_FUNC_CAP_FLAG_RDMA
))) {
358 mlx4_err(dev
, "The host supports neither eth nor rdma interfaces\n");
359 err
= -EPROTONOSUPPORT
;
362 func_cap
->flags
= field
;
363 quotas
= !!(func_cap
->flags
& QUERY_FUNC_CAP_FLAG_QUOTAS
);
365 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_NUM_PORTS_OFFSET
);
366 func_cap
->num_ports
= field
;
368 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_PF_BHVR_OFFSET
);
369 func_cap
->pf_context_behaviour
= size
;
372 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET
);
373 func_cap
->qp_quota
= size
& 0xFFFFFF;
375 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET
);
376 func_cap
->srq_quota
= size
& 0xFFFFFF;
378 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET
);
379 func_cap
->cq_quota
= size
& 0xFFFFFF;
381 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET
);
382 func_cap
->mpt_quota
= size
& 0xFFFFFF;
384 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET
);
385 func_cap
->mtt_quota
= size
& 0xFFFFFF;
387 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET
);
388 func_cap
->mcg_quota
= size
& 0xFFFFFF;
391 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP
);
392 func_cap
->qp_quota
= size
& 0xFFFFFF;
394 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP
);
395 func_cap
->srq_quota
= size
& 0xFFFFFF;
397 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP
);
398 func_cap
->cq_quota
= size
& 0xFFFFFF;
400 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP
);
401 func_cap
->mpt_quota
= size
& 0xFFFFFF;
403 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP
);
404 func_cap
->mtt_quota
= size
& 0xFFFFFF;
406 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP
);
407 func_cap
->mcg_quota
= size
& 0xFFFFFF;
409 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
410 func_cap
->max_eq
= size
& 0xFFFFFF;
412 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
413 func_cap
->reserved_eq
= size
& 0xFFFFFF;
418 /* logical port query */
419 if (gen_or_port
> dev
->caps
.num_ports
) {
424 MLX4_GET(func_cap
->flags1
, outbox
, QUERY_FUNC_CAP_FLAGS1_OFFSET
);
425 if (dev
->caps
.port_type
[gen_or_port
] == MLX4_PORT_TYPE_ETH
) {
426 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN
) {
427 mlx4_err(dev
, "VLAN is enforced on this port\n");
428 err
= -EPROTONOSUPPORT
;
432 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_FORCE_MAC
) {
433 mlx4_err(dev
, "Force mac is enabled on this port\n");
434 err
= -EPROTONOSUPPORT
;
437 } else if (dev
->caps
.port_type
[gen_or_port
] == MLX4_PORT_TYPE_IB
) {
438 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_FLAGS0_OFFSET
);
439 if (field
& QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID
) {
440 mlx4_err(dev
, "phy_wqe_gid is enforced on this ib port\n");
441 err
= -EPROTONOSUPPORT
;
446 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_PHYS_PORT_OFFSET
);
447 func_cap
->physical_port
= field
;
448 if (func_cap
->physical_port
!= gen_or_port
) {
453 if (func_cap
->flags1
& QUERY_FUNC_CAP_VF_ENABLE_QP0
) {
454 MLX4_GET(qkey
, outbox
, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET
);
455 func_cap
->qp0_qkey
= qkey
;
457 func_cap
->qp0_qkey
= 0;
460 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP0_TUNNEL
);
461 func_cap
->qp0_tunnel_qpn
= size
& 0xFFFFFF;
463 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP0_PROXY
);
464 func_cap
->qp0_proxy_qpn
= size
& 0xFFFFFF;
466 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP1_TUNNEL
);
467 func_cap
->qp1_tunnel_qpn
= size
& 0xFFFFFF;
469 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP1_PROXY
);
470 func_cap
->qp1_proxy_qpn
= size
& 0xFFFFFF;
472 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_NIC_INFO
)
473 MLX4_GET(func_cap
->phys_port_id
, outbox
,
474 QUERY_FUNC_CAP_PHYS_PORT_ID
);
476 /* All other resources are allocated by the master, but we still report
477 * 'num' and 'reserved' capabilities as follows:
478 * - num remains the maximum resource index
479 * - 'num - reserved' is the total available objects of a resource, but
480 * resource indices may be less than 'reserved'
481 * TODO: set per-resource quotas */
484 mlx4_free_cmd_mailbox(dev
, mailbox
);
489 int mlx4_QUERY_DEV_CAP(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
491 struct mlx4_cmd_mailbox
*mailbox
;
494 u32 field32
, flags
, ext_flags
;
500 #define QUERY_DEV_CAP_OUT_SIZE 0x100
501 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
502 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
503 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
504 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
505 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
506 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
507 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
508 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
509 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
510 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
511 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
512 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
513 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
514 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
515 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
516 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
517 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
518 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
519 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
520 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
521 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
522 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
523 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
524 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
525 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
526 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
527 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
528 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
529 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
530 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
531 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
532 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
533 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
534 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
535 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
536 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
537 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
538 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
539 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
540 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
541 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
542 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
543 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
544 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
545 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
546 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
547 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
548 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
549 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
550 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
551 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
552 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
553 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
554 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
555 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
556 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
557 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
558 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
559 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
560 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
561 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
562 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
563 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
564 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
565 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
566 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
567 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
568 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
569 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
570 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
571 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
572 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
573 #define QUERY_DEV_CAP_VXLAN 0x9e
576 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
578 return PTR_ERR(mailbox
);
579 outbox
= mailbox
->buf
;
581 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
582 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
586 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_QP_OFFSET
);
587 dev_cap
->reserved_qps
= 1 << (field
& 0xf);
588 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_OFFSET
);
589 dev_cap
->max_qps
= 1 << (field
& 0x1f);
590 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_SRQ_OFFSET
);
591 dev_cap
->reserved_srqs
= 1 << (field
>> 4);
592 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SRQ_OFFSET
);
593 dev_cap
->max_srqs
= 1 << (field
& 0x1f);
594 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET
);
595 dev_cap
->max_cq_sz
= 1 << field
;
596 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_CQ_OFFSET
);
597 dev_cap
->reserved_cqs
= 1 << (field
& 0xf);
598 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_CQ_OFFSET
);
599 dev_cap
->max_cqs
= 1 << (field
& 0x1f);
600 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MPT_OFFSET
);
601 dev_cap
->max_mpts
= 1 << (field
& 0x3f);
602 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_EQ_OFFSET
);
603 dev_cap
->reserved_eqs
= field
& 0xf;
604 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_EQ_OFFSET
);
605 dev_cap
->max_eqs
= 1 << (field
& 0xf);
606 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MTT_OFFSET
);
607 dev_cap
->reserved_mtts
= 1 << (field
>> 4);
608 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET
);
609 dev_cap
->max_mrw_sz
= 1 << field
;
610 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MRW_OFFSET
);
611 dev_cap
->reserved_mrws
= 1 << (field
& 0xf);
612 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET
);
613 dev_cap
->max_mtt_seg
= 1 << (field
& 0x3f);
614 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET
);
615 dev_cap
->max_requester_per_qp
= 1 << (field
& 0x3f);
616 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_RES_QP_OFFSET
);
617 dev_cap
->max_responder_per_qp
= 1 << (field
& 0x3f);
618 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_GSO_OFFSET
);
621 dev_cap
->max_gso_sz
= 0;
623 dev_cap
->max_gso_sz
= 1 << field
;
625 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSS_OFFSET
);
627 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS_XOR
;
629 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS_TOP
;
632 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS
;
633 dev_cap
->max_rss_tbl_sz
= 1 << field
;
635 dev_cap
->max_rss_tbl_sz
= 0;
636 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_RDMA_OFFSET
);
637 dev_cap
->max_rdma_global
= 1 << (field
& 0x3f);
638 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_ACK_DELAY_OFFSET
);
639 dev_cap
->local_ca_ack_delay
= field
& 0x1f;
640 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
641 dev_cap
->num_ports
= field
& 0xf;
642 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET
);
643 dev_cap
->max_msg_sz
= 1 << (field
& 0x1f);
644 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
646 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_FS_EN
;
647 dev_cap
->fs_log_max_ucast_qp_range_size
= field
& 0x1f;
648 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
650 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
;
651 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET
);
652 dev_cap
->fs_max_num_qp_per_entry
= field
;
653 MLX4_GET(stat_rate
, outbox
, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET
);
654 dev_cap
->stat_rate_support
= stat_rate
;
655 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
657 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_TS
;
658 MLX4_GET(ext_flags
, outbox
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
659 MLX4_GET(flags
, outbox
, QUERY_DEV_CAP_FLAGS_OFFSET
);
660 dev_cap
->flags
= flags
| (u64
)ext_flags
<< 32;
661 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_UAR_OFFSET
);
662 dev_cap
->reserved_uars
= field
>> 4;
663 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_UAR_SZ_OFFSET
);
664 dev_cap
->uar_size
= 1 << ((field
& 0x3f) + 20);
665 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_PAGE_SZ_OFFSET
);
666 dev_cap
->min_page_sz
= 1 << field
;
668 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_BF_OFFSET
);
670 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET
);
671 dev_cap
->bf_reg_size
= 1 << (field
& 0x1f);
672 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET
);
673 if ((1 << (field
& 0x3f)) > (PAGE_SIZE
/ dev_cap
->bf_reg_size
))
675 dev_cap
->bf_regs_per_page
= 1 << (field
& 0x3f);
676 mlx4_dbg(dev
, "BlueFlame available (reg size %d, regs/page %d)\n",
677 dev_cap
->bf_reg_size
, dev_cap
->bf_regs_per_page
);
679 dev_cap
->bf_reg_size
= 0;
680 mlx4_dbg(dev
, "BlueFlame not available\n");
683 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET
);
684 dev_cap
->max_sq_sg
= field
;
685 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET
);
686 dev_cap
->max_sq_desc_sz
= size
;
688 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET
);
689 dev_cap
->max_qp_per_mcg
= 1 << field
;
690 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MCG_OFFSET
);
691 dev_cap
->reserved_mgms
= field
& 0xf;
692 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MCG_OFFSET
);
693 dev_cap
->max_mcgs
= 1 << field
;
694 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_PD_OFFSET
);
695 dev_cap
->reserved_pds
= field
>> 4;
696 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_PD_OFFSET
);
697 dev_cap
->max_pds
= 1 << (field
& 0x3f);
698 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_XRC_OFFSET
);
699 dev_cap
->reserved_xrcds
= field
>> 4;
700 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_XRC_OFFSET
);
701 dev_cap
->max_xrcds
= 1 << (field
& 0x1f);
703 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET
);
704 dev_cap
->rdmarc_entry_sz
= size
;
705 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET
);
706 dev_cap
->qpc_entry_sz
= size
;
707 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET
);
708 dev_cap
->aux_entry_sz
= size
;
709 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET
);
710 dev_cap
->altc_entry_sz
= size
;
711 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET
);
712 dev_cap
->eqc_entry_sz
= size
;
713 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET
);
714 dev_cap
->cqc_entry_sz
= size
;
715 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET
);
716 dev_cap
->srq_entry_sz
= size
;
717 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET
);
718 dev_cap
->cmpt_entry_sz
= size
;
719 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET
);
720 dev_cap
->mtt_entry_sz
= size
;
721 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET
);
722 dev_cap
->dmpt_entry_sz
= size
;
724 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET
);
725 dev_cap
->max_srq_sz
= 1 << field
;
726 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET
);
727 dev_cap
->max_qp_sz
= 1 << field
;
728 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSZ_SRQ_OFFSET
);
729 dev_cap
->resize_srq
= field
& 1;
730 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET
);
731 dev_cap
->max_rq_sg
= field
;
732 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET
);
733 dev_cap
->max_rq_desc_sz
= size
;
735 MLX4_GET(dev_cap
->bmme_flags
, outbox
,
736 QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
737 MLX4_GET(dev_cap
->reserved_lkey
, outbox
,
738 QUERY_DEV_CAP_RSVD_LKEY_OFFSET
);
739 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FW_REASSIGN_MAC
);
741 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN
;
742 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VXLAN
);
744 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
;
745 MLX4_GET(dev_cap
->max_icm_sz
, outbox
,
746 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET
);
747 if (dev_cap
->flags
& MLX4_DEV_CAP_FLAG_COUNTERS
)
748 MLX4_GET(dev_cap
->max_counters
, outbox
,
749 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET
);
751 MLX4_GET(field32
, outbox
, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET
);
752 if (field32
& (1 << 16))
753 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_UPDATE_QP
;
754 if (field32
& (1 << 26))
755 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL
;
756 if (field32
& (1 << 20))
757 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_FSM
;
759 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
760 for (i
= 1; i
<= dev_cap
->num_ports
; ++i
) {
761 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
762 dev_cap
->max_vl
[i
] = field
>> 4;
763 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MTU_WIDTH_OFFSET
);
764 dev_cap
->ib_mtu
[i
] = field
>> 4;
765 dev_cap
->max_port_width
[i
] = field
& 0xf;
766 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_GID_OFFSET
);
767 dev_cap
->max_gids
[i
] = 1 << (field
& 0xf);
768 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_PKEY_OFFSET
);
769 dev_cap
->max_pkeys
[i
] = 1 << (field
& 0xf);
772 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
773 #define QUERY_PORT_MTU_OFFSET 0x01
774 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
775 #define QUERY_PORT_WIDTH_OFFSET 0x06
776 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
777 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
778 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
779 #define QUERY_PORT_MAC_OFFSET 0x10
780 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
781 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
782 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
784 for (i
= 1; i
<= dev_cap
->num_ports
; ++i
) {
785 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, i
, 0, MLX4_CMD_QUERY_PORT
,
786 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
790 MLX4_GET(field
, outbox
, QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
791 dev_cap
->supported_port_types
[i
] = field
& 3;
792 dev_cap
->suggested_type
[i
] = (field
>> 3) & 1;
793 dev_cap
->default_sense
[i
] = (field
>> 4) & 1;
794 MLX4_GET(field
, outbox
, QUERY_PORT_MTU_OFFSET
);
795 dev_cap
->ib_mtu
[i
] = field
& 0xf;
796 MLX4_GET(field
, outbox
, QUERY_PORT_WIDTH_OFFSET
);
797 dev_cap
->max_port_width
[i
] = field
& 0xf;
798 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_GID_PKEY_OFFSET
);
799 dev_cap
->max_gids
[i
] = 1 << (field
>> 4);
800 dev_cap
->max_pkeys
[i
] = 1 << (field
& 0xf);
801 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_VL_OFFSET
);
802 dev_cap
->max_vl
[i
] = field
& 0xf;
803 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_MACVLAN_OFFSET
);
804 dev_cap
->log_max_macs
[i
] = field
& 0xf;
805 dev_cap
->log_max_vlans
[i
] = field
>> 4;
806 MLX4_GET(dev_cap
->eth_mtu
[i
], outbox
, QUERY_PORT_ETH_MTU_OFFSET
);
807 MLX4_GET(dev_cap
->def_mac
[i
], outbox
, QUERY_PORT_MAC_OFFSET
);
808 MLX4_GET(field32
, outbox
, QUERY_PORT_TRANS_VENDOR_OFFSET
);
809 dev_cap
->trans_type
[i
] = field32
>> 24;
810 dev_cap
->vendor_oui
[i
] = field32
& 0xffffff;
811 MLX4_GET(dev_cap
->wavelength
[i
], outbox
, QUERY_PORT_WAVELENGTH_OFFSET
);
812 MLX4_GET(dev_cap
->trans_code
[i
], outbox
, QUERY_PORT_TRANS_CODE_OFFSET
);
816 mlx4_dbg(dev
, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
817 dev_cap
->bmme_flags
, dev_cap
->reserved_lkey
);
820 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
821 * we can't use any EQs whose doorbell falls on that page,
822 * even if the EQ itself isn't reserved.
824 dev_cap
->reserved_eqs
= max(dev_cap
->reserved_uars
* 4,
825 dev_cap
->reserved_eqs
);
827 mlx4_dbg(dev
, "Max ICM size %lld MB\n",
828 (unsigned long long) dev_cap
->max_icm_sz
>> 20);
829 mlx4_dbg(dev
, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
830 dev_cap
->max_qps
, dev_cap
->reserved_qps
, dev_cap
->qpc_entry_sz
);
831 mlx4_dbg(dev
, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
832 dev_cap
->max_srqs
, dev_cap
->reserved_srqs
, dev_cap
->srq_entry_sz
);
833 mlx4_dbg(dev
, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
834 dev_cap
->max_cqs
, dev_cap
->reserved_cqs
, dev_cap
->cqc_entry_sz
);
835 mlx4_dbg(dev
, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
836 dev_cap
->max_eqs
, dev_cap
->reserved_eqs
, dev_cap
->eqc_entry_sz
);
837 mlx4_dbg(dev
, "reserved MPTs: %d, reserved MTTs: %d\n",
838 dev_cap
->reserved_mrws
, dev_cap
->reserved_mtts
);
839 mlx4_dbg(dev
, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
840 dev_cap
->max_pds
, dev_cap
->reserved_pds
, dev_cap
->reserved_uars
);
841 mlx4_dbg(dev
, "Max QP/MCG: %d, reserved MGMs: %d\n",
842 dev_cap
->max_pds
, dev_cap
->reserved_mgms
);
843 mlx4_dbg(dev
, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
844 dev_cap
->max_cq_sz
, dev_cap
->max_qp_sz
, dev_cap
->max_srq_sz
);
845 mlx4_dbg(dev
, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
846 dev_cap
->local_ca_ack_delay
, 128 << dev_cap
->ib_mtu
[1],
847 dev_cap
->max_port_width
[1]);
848 mlx4_dbg(dev
, "Max SQ desc size: %d, max SQ S/G: %d\n",
849 dev_cap
->max_sq_desc_sz
, dev_cap
->max_sq_sg
);
850 mlx4_dbg(dev
, "Max RQ desc size: %d, max RQ S/G: %d\n",
851 dev_cap
->max_rq_desc_sz
, dev_cap
->max_rq_sg
);
852 mlx4_dbg(dev
, "Max GSO size: %d\n", dev_cap
->max_gso_sz
);
853 mlx4_dbg(dev
, "Max counters: %d\n", dev_cap
->max_counters
);
854 mlx4_dbg(dev
, "Max RSS Table size: %d\n", dev_cap
->max_rss_tbl_sz
);
856 dump_dev_cap_flags(dev
, dev_cap
->flags
);
857 dump_dev_cap_flags2(dev
, dev_cap
->flags2
);
860 mlx4_free_cmd_mailbox(dev
, mailbox
);
864 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev
*dev
, int slave
,
865 struct mlx4_vhcr
*vhcr
,
866 struct mlx4_cmd_mailbox
*inbox
,
867 struct mlx4_cmd_mailbox
*outbox
,
868 struct mlx4_cmd_info
*cmd
)
877 struct mlx4_active_ports actv_ports
;
879 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
880 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
884 /* add port mng change event capability and disable mw type 1
885 * unconditionally to slaves
887 MLX4_GET(flags
, outbox
->buf
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
888 flags
|= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV
;
889 flags
&= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW
;
890 actv_ports
= mlx4_get_active_ports(dev
, slave
);
891 first_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
892 for (slave_port
= 0, real_port
= first_port
;
893 real_port
< first_port
+
894 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
895 ++real_port
, ++slave_port
) {
896 if (flags
& (MLX4_DEV_CAP_FLAG_WOL_PORT1
<< real_port
))
897 flags
|= MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
;
899 flags
&= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
);
901 for (; slave_port
< dev
->caps
.num_ports
; ++slave_port
)
902 flags
&= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
);
903 MLX4_PUT(outbox
->buf
, flags
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
905 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
907 field
|= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
) & 0x0F;
908 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
910 /* For guests, disable timestamp */
911 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
913 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
915 /* For guests, disable vxlan tunneling */
916 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_VXLAN
);
918 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_VXLAN
);
920 /* For guests, report Blueflame disabled */
921 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_BF_OFFSET
);
923 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_BF_OFFSET
);
925 /* For guests, disable mw type 2 */
926 MLX4_GET(bmme_flags
, outbox
->buf
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
927 bmme_flags
&= ~MLX4_BMME_FLAG_TYPE_2_WIN
;
928 MLX4_PUT(outbox
->buf
, bmme_flags
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
930 /* turn off device-managed steering capability if not enabled */
931 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_DEVICE_MANAGED
) {
932 MLX4_GET(field
, outbox
->buf
,
933 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
935 MLX4_PUT(outbox
->buf
, field
,
936 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
939 /* turn off ipoib managed steering for guests */
940 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
942 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
947 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
948 struct mlx4_vhcr
*vhcr
,
949 struct mlx4_cmd_mailbox
*inbox
,
950 struct mlx4_cmd_mailbox
*outbox
,
951 struct mlx4_cmd_info
*cmd
)
953 struct mlx4_priv
*priv
= mlx4_priv(dev
);
958 int admin_link_state
;
959 int port
= mlx4_slave_convert_port(dev
, slave
,
960 vhcr
->in_modifier
& 0xFF);
962 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
963 #define MLX4_PORT_LINK_UP_MASK 0x80
964 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
965 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
970 vhcr
->in_modifier
= (vhcr
->in_modifier
& ~0xFF) |
973 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, vhcr
->in_modifier
, 0,
974 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
977 if (!err
&& dev
->caps
.function
!= slave
) {
978 def_mac
= priv
->mfunc
.master
.vf_oper
[slave
].vport
[vhcr
->in_modifier
].state
.mac
;
979 MLX4_PUT(outbox
->buf
, def_mac
, QUERY_PORT_MAC_OFFSET
);
981 /* get port type - currently only eth is enabled */
982 MLX4_GET(port_type
, outbox
->buf
,
983 QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
985 /* No link sensing allowed */
986 port_type
&= MLX4_VF_PORT_NO_LINK_SENSE_MASK
;
987 /* set port type to currently operating port type */
988 port_type
|= (dev
->caps
.port_type
[vhcr
->in_modifier
] & 0x3);
990 admin_link_state
= priv
->mfunc
.master
.vf_oper
[slave
].vport
[vhcr
->in_modifier
].state
.link_state
;
991 if (IFLA_VF_LINK_STATE_ENABLE
== admin_link_state
)
992 port_type
|= MLX4_PORT_LINK_UP_MASK
;
993 else if (IFLA_VF_LINK_STATE_DISABLE
== admin_link_state
)
994 port_type
&= ~MLX4_PORT_LINK_UP_MASK
;
996 MLX4_PUT(outbox
->buf
, port_type
,
997 QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
999 if (dev
->caps
.port_type
[vhcr
->in_modifier
] == MLX4_PORT_TYPE_ETH
)
1000 short_field
= mlx4_get_slave_num_gids(dev
, slave
, port
);
1002 short_field
= 1; /* slave max gids */
1003 MLX4_PUT(outbox
->buf
, short_field
,
1004 QUERY_PORT_CUR_MAX_GID_OFFSET
);
1006 short_field
= dev
->caps
.pkey_table_len
[vhcr
->in_modifier
];
1007 MLX4_PUT(outbox
->buf
, short_field
,
1008 QUERY_PORT_CUR_MAX_PKEY_OFFSET
);
1014 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev
*dev
, u8 port
,
1015 int *gid_tbl_len
, int *pkey_tbl_len
)
1017 struct mlx4_cmd_mailbox
*mailbox
;
1022 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1023 if (IS_ERR(mailbox
))
1024 return PTR_ERR(mailbox
);
1026 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, port
, 0,
1027 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
1032 outbox
= mailbox
->buf
;
1034 MLX4_GET(field
, outbox
, QUERY_PORT_CUR_MAX_GID_OFFSET
);
1035 *gid_tbl_len
= field
;
1037 MLX4_GET(field
, outbox
, QUERY_PORT_CUR_MAX_PKEY_OFFSET
);
1038 *pkey_tbl_len
= field
;
1041 mlx4_free_cmd_mailbox(dev
, mailbox
);
1044 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len
);
1046 int mlx4_map_cmd(struct mlx4_dev
*dev
, u16 op
, struct mlx4_icm
*icm
, u64 virt
)
1048 struct mlx4_cmd_mailbox
*mailbox
;
1049 struct mlx4_icm_iter iter
;
1057 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1058 if (IS_ERR(mailbox
))
1059 return PTR_ERR(mailbox
);
1060 pages
= mailbox
->buf
;
1062 for (mlx4_icm_first(icm
, &iter
);
1063 !mlx4_icm_last(&iter
);
1064 mlx4_icm_next(&iter
)) {
1066 * We have to pass pages that are aligned to their
1067 * size, so find the least significant 1 in the
1068 * address or size and use that as our log2 size.
1070 lg
= ffs(mlx4_icm_addr(&iter
) | mlx4_icm_size(&iter
)) - 1;
1071 if (lg
< MLX4_ICM_PAGE_SHIFT
) {
1072 mlx4_warn(dev
, "Got FW area not aligned to %d (%llx/%lx)\n",
1074 (unsigned long long) mlx4_icm_addr(&iter
),
1075 mlx4_icm_size(&iter
));
1080 for (i
= 0; i
< mlx4_icm_size(&iter
) >> lg
; ++i
) {
1082 pages
[nent
* 2] = cpu_to_be64(virt
);
1086 pages
[nent
* 2 + 1] =
1087 cpu_to_be64((mlx4_icm_addr(&iter
) + (i
<< lg
)) |
1088 (lg
- MLX4_ICM_PAGE_SHIFT
));
1089 ts
+= 1 << (lg
- 10);
1092 if (++nent
== MLX4_MAILBOX_SIZE
/ 16) {
1093 err
= mlx4_cmd(dev
, mailbox
->dma
, nent
, 0, op
,
1094 MLX4_CMD_TIME_CLASS_B
,
1104 err
= mlx4_cmd(dev
, mailbox
->dma
, nent
, 0, op
,
1105 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1110 case MLX4_CMD_MAP_FA
:
1111 mlx4_dbg(dev
, "Mapped %d chunks/%d KB for FW\n", tc
, ts
);
1113 case MLX4_CMD_MAP_ICM_AUX
:
1114 mlx4_dbg(dev
, "Mapped %d chunks/%d KB for ICM aux\n", tc
, ts
);
1116 case MLX4_CMD_MAP_ICM
:
1117 mlx4_dbg(dev
, "Mapped %d chunks/%d KB at %llx for ICM\n",
1118 tc
, ts
, (unsigned long long) virt
- (ts
<< 10));
1123 mlx4_free_cmd_mailbox(dev
, mailbox
);
1127 int mlx4_MAP_FA(struct mlx4_dev
*dev
, struct mlx4_icm
*icm
)
1129 return mlx4_map_cmd(dev
, MLX4_CMD_MAP_FA
, icm
, -1);
1132 int mlx4_UNMAP_FA(struct mlx4_dev
*dev
)
1134 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_UNMAP_FA
,
1135 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1139 int mlx4_RUN_FW(struct mlx4_dev
*dev
)
1141 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_RUN_FW
,
1142 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1145 int mlx4_QUERY_FW(struct mlx4_dev
*dev
)
1147 struct mlx4_fw
*fw
= &mlx4_priv(dev
)->fw
;
1148 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
1149 struct mlx4_cmd_mailbox
*mailbox
;
1156 #define QUERY_FW_OUT_SIZE 0x100
1157 #define QUERY_FW_VER_OFFSET 0x00
1158 #define QUERY_FW_PPF_ID 0x09
1159 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1160 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1161 #define QUERY_FW_ERR_START_OFFSET 0x30
1162 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1163 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1165 #define QUERY_FW_SIZE_OFFSET 0x00
1166 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1167 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1169 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1170 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1172 #define QUERY_FW_CLOCK_OFFSET 0x50
1173 #define QUERY_FW_CLOCK_BAR 0x58
1175 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1176 if (IS_ERR(mailbox
))
1177 return PTR_ERR(mailbox
);
1178 outbox
= mailbox
->buf
;
1180 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_FW
,
1181 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1185 MLX4_GET(fw_ver
, outbox
, QUERY_FW_VER_OFFSET
);
1187 * FW subminor version is at more significant bits than minor
1188 * version, so swap here.
1190 dev
->caps
.fw_ver
= (fw_ver
& 0xffff00000000ull
) |
1191 ((fw_ver
& 0xffff0000ull
) >> 16) |
1192 ((fw_ver
& 0x0000ffffull
) << 16);
1194 MLX4_GET(lg
, outbox
, QUERY_FW_PPF_ID
);
1195 dev
->caps
.function
= lg
;
1197 if (mlx4_is_slave(dev
))
1201 MLX4_GET(cmd_if_rev
, outbox
, QUERY_FW_CMD_IF_REV_OFFSET
);
1202 if (cmd_if_rev
< MLX4_COMMAND_INTERFACE_MIN_REV
||
1203 cmd_if_rev
> MLX4_COMMAND_INTERFACE_MAX_REV
) {
1204 mlx4_err(dev
, "Installed FW has unsupported command interface revision %d\n",
1206 mlx4_err(dev
, "(Installed FW version is %d.%d.%03d)\n",
1207 (int) (dev
->caps
.fw_ver
>> 32),
1208 (int) (dev
->caps
.fw_ver
>> 16) & 0xffff,
1209 (int) dev
->caps
.fw_ver
& 0xffff);
1210 mlx4_err(dev
, "This driver version supports only revisions %d to %d\n",
1211 MLX4_COMMAND_INTERFACE_MIN_REV
, MLX4_COMMAND_INTERFACE_MAX_REV
);
1216 if (cmd_if_rev
< MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS
)
1217 dev
->flags
|= MLX4_FLAG_OLD_PORT_CMDS
;
1219 MLX4_GET(lg
, outbox
, QUERY_FW_MAX_CMD_OFFSET
);
1220 cmd
->max_cmds
= 1 << lg
;
1222 mlx4_dbg(dev
, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1223 (int) (dev
->caps
.fw_ver
>> 32),
1224 (int) (dev
->caps
.fw_ver
>> 16) & 0xffff,
1225 (int) dev
->caps
.fw_ver
& 0xffff,
1226 cmd_if_rev
, cmd
->max_cmds
);
1228 MLX4_GET(fw
->catas_offset
, outbox
, QUERY_FW_ERR_START_OFFSET
);
1229 MLX4_GET(fw
->catas_size
, outbox
, QUERY_FW_ERR_SIZE_OFFSET
);
1230 MLX4_GET(fw
->catas_bar
, outbox
, QUERY_FW_ERR_BAR_OFFSET
);
1231 fw
->catas_bar
= (fw
->catas_bar
>> 6) * 2;
1233 mlx4_dbg(dev
, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1234 (unsigned long long) fw
->catas_offset
, fw
->catas_size
, fw
->catas_bar
);
1236 MLX4_GET(fw
->fw_pages
, outbox
, QUERY_FW_SIZE_OFFSET
);
1237 MLX4_GET(fw
->clr_int_base
, outbox
, QUERY_FW_CLR_INT_BASE_OFFSET
);
1238 MLX4_GET(fw
->clr_int_bar
, outbox
, QUERY_FW_CLR_INT_BAR_OFFSET
);
1239 fw
->clr_int_bar
= (fw
->clr_int_bar
>> 6) * 2;
1241 MLX4_GET(fw
->comm_base
, outbox
, QUERY_FW_COMM_BASE_OFFSET
);
1242 MLX4_GET(fw
->comm_bar
, outbox
, QUERY_FW_COMM_BAR_OFFSET
);
1243 fw
->comm_bar
= (fw
->comm_bar
>> 6) * 2;
1244 mlx4_dbg(dev
, "Communication vector bar:%d offset:0x%llx\n",
1245 fw
->comm_bar
, fw
->comm_base
);
1246 mlx4_dbg(dev
, "FW size %d KB\n", fw
->fw_pages
>> 2);
1248 MLX4_GET(fw
->clock_offset
, outbox
, QUERY_FW_CLOCK_OFFSET
);
1249 MLX4_GET(fw
->clock_bar
, outbox
, QUERY_FW_CLOCK_BAR
);
1250 fw
->clock_bar
= (fw
->clock_bar
>> 6) * 2;
1251 mlx4_dbg(dev
, "Internal clock bar:%d offset:0x%llx\n",
1252 fw
->clock_bar
, fw
->clock_offset
);
1255 * Round up number of system pages needed in case
1256 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1259 ALIGN(fw
->fw_pages
, PAGE_SIZE
/ MLX4_ICM_PAGE_SIZE
) >>
1260 (PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
);
1262 mlx4_dbg(dev
, "Clear int @ %llx, BAR %d\n",
1263 (unsigned long long) fw
->clr_int_base
, fw
->clr_int_bar
);
1266 mlx4_free_cmd_mailbox(dev
, mailbox
);
1270 int mlx4_QUERY_FW_wrapper(struct mlx4_dev
*dev
, int slave
,
1271 struct mlx4_vhcr
*vhcr
,
1272 struct mlx4_cmd_mailbox
*inbox
,
1273 struct mlx4_cmd_mailbox
*outbox
,
1274 struct mlx4_cmd_info
*cmd
)
1279 outbuf
= outbox
->buf
;
1280 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, 0, 0, MLX4_CMD_QUERY_FW
,
1281 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1285 /* for slaves, set pci PPF ID to invalid and zero out everything
1286 * else except FW version */
1287 outbuf
[0] = outbuf
[1] = 0;
1288 memset(&outbuf
[8], 0, QUERY_FW_OUT_SIZE
- 8);
1289 outbuf
[QUERY_FW_PPF_ID
] = MLX4_INVALID_SLAVE_ID
;
1294 static void get_board_id(void *vsd
, char *board_id
)
1298 #define VSD_OFFSET_SIG1 0x00
1299 #define VSD_OFFSET_SIG2 0xde
1300 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1301 #define VSD_OFFSET_TS_BOARD_ID 0x20
1303 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1305 memset(board_id
, 0, MLX4_BOARD_ID_LEN
);
1307 if (be16_to_cpup(vsd
+ VSD_OFFSET_SIG1
) == VSD_SIGNATURE_TOPSPIN
&&
1308 be16_to_cpup(vsd
+ VSD_OFFSET_SIG2
) == VSD_SIGNATURE_TOPSPIN
) {
1309 strlcpy(board_id
, vsd
+ VSD_OFFSET_TS_BOARD_ID
, MLX4_BOARD_ID_LEN
);
1312 * The board ID is a string but the firmware byte
1313 * swaps each 4-byte word before passing it back to
1314 * us. Therefore we need to swab it before printing.
1316 for (i
= 0; i
< 4; ++i
)
1317 ((u32
*) board_id
)[i
] =
1318 swab32(*(u32
*) (vsd
+ VSD_OFFSET_MLX_BOARD_ID
+ i
* 4));
1322 int mlx4_QUERY_ADAPTER(struct mlx4_dev
*dev
, struct mlx4_adapter
*adapter
)
1324 struct mlx4_cmd_mailbox
*mailbox
;
1328 #define QUERY_ADAPTER_OUT_SIZE 0x100
1329 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1330 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1332 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1333 if (IS_ERR(mailbox
))
1334 return PTR_ERR(mailbox
);
1335 outbox
= mailbox
->buf
;
1337 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_ADAPTER
,
1338 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1342 MLX4_GET(adapter
->inta_pin
, outbox
, QUERY_ADAPTER_INTA_PIN_OFFSET
);
1344 get_board_id(outbox
+ QUERY_ADAPTER_VSD_OFFSET
/ 4,
1348 mlx4_free_cmd_mailbox(dev
, mailbox
);
1352 int mlx4_INIT_HCA(struct mlx4_dev
*dev
, struct mlx4_init_hca_param
*param
)
1354 struct mlx4_cmd_mailbox
*mailbox
;
1358 #define INIT_HCA_IN_SIZE 0x200
1359 #define INIT_HCA_VERSION_OFFSET 0x000
1360 #define INIT_HCA_VERSION 2
1361 #define INIT_HCA_VXLAN_OFFSET 0x0c
1362 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1363 #define INIT_HCA_FLAGS_OFFSET 0x014
1364 #define INIT_HCA_QPC_OFFSET 0x020
1365 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1366 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1367 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1368 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1369 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1370 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1371 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1372 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1373 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1374 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1375 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1376 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1377 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1378 #define INIT_HCA_MCAST_OFFSET 0x0c0
1379 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1380 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1381 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1382 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1383 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1384 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1385 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1386 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1387 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1388 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1389 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1390 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1391 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1392 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1393 #define INIT_HCA_TPT_OFFSET 0x0f0
1394 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1395 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1396 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1397 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1398 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1399 #define INIT_HCA_UAR_OFFSET 0x120
1400 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1401 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1403 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1404 if (IS_ERR(mailbox
))
1405 return PTR_ERR(mailbox
);
1406 inbox
= mailbox
->buf
;
1408 *((u8
*) mailbox
->buf
+ INIT_HCA_VERSION_OFFSET
) = INIT_HCA_VERSION
;
1410 *((u8
*) mailbox
->buf
+ INIT_HCA_CACHELINE_SZ_OFFSET
) =
1411 (ilog2(cache_line_size()) - 4) << 5;
1413 #if defined(__LITTLE_ENDIAN)
1414 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) &= ~cpu_to_be32(1 << 1);
1415 #elif defined(__BIG_ENDIAN)
1416 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 1);
1418 #error Host endianness not defined
1420 /* Check port for UD address vector: */
1421 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1);
1423 /* Enable IPoIB checksumming if we can: */
1424 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IPOIB_CSUM
)
1425 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 3);
1427 /* Enable QoS support if module parameter set */
1429 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 2);
1431 /* enable counters */
1432 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
)
1433 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 4);
1435 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1436 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_64B_EQE
) {
1437 *(inbox
+ INIT_HCA_EQE_CQE_OFFSETS
/ 4) |= cpu_to_be32(1 << 29);
1438 dev
->caps
.eqe_size
= 64;
1439 dev
->caps
.eqe_factor
= 1;
1441 dev
->caps
.eqe_size
= 32;
1442 dev
->caps
.eqe_factor
= 0;
1445 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_64B_CQE
) {
1446 *(inbox
+ INIT_HCA_EQE_CQE_OFFSETS
/ 4) |= cpu_to_be32(1 << 30);
1447 dev
->caps
.cqe_size
= 64;
1448 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_64B_CQE
;
1450 dev
->caps
.cqe_size
= 32;
1453 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1455 MLX4_PUT(inbox
, param
->qpc_base
, INIT_HCA_QPC_BASE_OFFSET
);
1456 MLX4_PUT(inbox
, param
->log_num_qps
, INIT_HCA_LOG_QP_OFFSET
);
1457 MLX4_PUT(inbox
, param
->srqc_base
, INIT_HCA_SRQC_BASE_OFFSET
);
1458 MLX4_PUT(inbox
, param
->log_num_srqs
, INIT_HCA_LOG_SRQ_OFFSET
);
1459 MLX4_PUT(inbox
, param
->cqc_base
, INIT_HCA_CQC_BASE_OFFSET
);
1460 MLX4_PUT(inbox
, param
->log_num_cqs
, INIT_HCA_LOG_CQ_OFFSET
);
1461 MLX4_PUT(inbox
, param
->altc_base
, INIT_HCA_ALTC_BASE_OFFSET
);
1462 MLX4_PUT(inbox
, param
->auxc_base
, INIT_HCA_AUXC_BASE_OFFSET
);
1463 MLX4_PUT(inbox
, param
->eqc_base
, INIT_HCA_EQC_BASE_OFFSET
);
1464 MLX4_PUT(inbox
, param
->log_num_eqs
, INIT_HCA_LOG_EQ_OFFSET
);
1465 MLX4_PUT(inbox
, param
->rdmarc_base
, INIT_HCA_RDMARC_BASE_OFFSET
);
1466 MLX4_PUT(inbox
, param
->log_rd_per_qp
, INIT_HCA_LOG_RD_OFFSET
);
1468 /* steering attributes */
1469 if (dev
->caps
.steering_mode
==
1470 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1471 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |=
1473 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN
);
1475 MLX4_PUT(inbox
, param
->mc_base
, INIT_HCA_FS_BASE_OFFSET
);
1476 MLX4_PUT(inbox
, param
->log_mc_entry_sz
,
1477 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET
);
1478 MLX4_PUT(inbox
, param
->log_mc_table_sz
,
1479 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET
);
1480 /* Enable Ethernet flow steering
1481 * with udp unicast and tcp unicast
1483 MLX4_PUT(inbox
, (u8
) (MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
),
1484 INIT_HCA_FS_ETH_BITS_OFFSET
);
1485 MLX4_PUT(inbox
, (u16
) MLX4_FS_NUM_OF_L2_ADDR
,
1486 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET
);
1487 /* Enable IPoIB flow steering
1488 * with udp unicast and tcp unicast
1490 MLX4_PUT(inbox
, (u8
) (MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
),
1491 INIT_HCA_FS_IB_BITS_OFFSET
);
1492 MLX4_PUT(inbox
, (u16
) MLX4_FS_NUM_OF_L2_ADDR
,
1493 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET
);
1495 MLX4_PUT(inbox
, param
->mc_base
, INIT_HCA_MC_BASE_OFFSET
);
1496 MLX4_PUT(inbox
, param
->log_mc_entry_sz
,
1497 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET
);
1498 MLX4_PUT(inbox
, param
->log_mc_hash_sz
,
1499 INIT_HCA_LOG_MC_HASH_SZ_OFFSET
);
1500 MLX4_PUT(inbox
, param
->log_mc_table_sz
,
1501 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET
);
1502 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_B0
)
1503 MLX4_PUT(inbox
, (u8
) (1 << 3),
1504 INIT_HCA_UC_STEERING_OFFSET
);
1507 /* TPT attributes */
1509 MLX4_PUT(inbox
, param
->dmpt_base
, INIT_HCA_DMPT_BASE_OFFSET
);
1510 MLX4_PUT(inbox
, param
->mw_enabled
, INIT_HCA_TPT_MW_OFFSET
);
1511 MLX4_PUT(inbox
, param
->log_mpt_sz
, INIT_HCA_LOG_MPT_SZ_OFFSET
);
1512 MLX4_PUT(inbox
, param
->mtt_base
, INIT_HCA_MTT_BASE_OFFSET
);
1513 MLX4_PUT(inbox
, param
->cmpt_base
, INIT_HCA_CMPT_BASE_OFFSET
);
1515 /* UAR attributes */
1517 MLX4_PUT(inbox
, param
->uar_page_sz
, INIT_HCA_UAR_PAGE_SZ_OFFSET
);
1518 MLX4_PUT(inbox
, param
->log_uar_sz
, INIT_HCA_LOG_UAR_SZ_OFFSET
);
1520 /* set parser VXLAN attributes */
1521 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
) {
1522 u8 parser_params
= 0;
1523 MLX4_PUT(inbox
, parser_params
, INIT_HCA_VXLAN_OFFSET
);
1526 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_INIT_HCA
, 10000,
1530 mlx4_err(dev
, "INIT_HCA returns %d\n", err
);
1532 mlx4_free_cmd_mailbox(dev
, mailbox
);
1536 int mlx4_QUERY_HCA(struct mlx4_dev
*dev
,
1537 struct mlx4_init_hca_param
*param
)
1539 struct mlx4_cmd_mailbox
*mailbox
;
1545 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1546 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1548 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1549 if (IS_ERR(mailbox
))
1550 return PTR_ERR(mailbox
);
1551 outbox
= mailbox
->buf
;
1553 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
1555 MLX4_CMD_TIME_CLASS_B
,
1556 !mlx4_is_slave(dev
));
1560 MLX4_GET(param
->global_caps
, outbox
, QUERY_HCA_GLOBAL_CAPS_OFFSET
);
1561 MLX4_GET(param
->hca_core_clock
, outbox
, QUERY_HCA_CORE_CLOCK_OFFSET
);
1563 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1565 MLX4_GET(param
->qpc_base
, outbox
, INIT_HCA_QPC_BASE_OFFSET
);
1566 MLX4_GET(param
->log_num_qps
, outbox
, INIT_HCA_LOG_QP_OFFSET
);
1567 MLX4_GET(param
->srqc_base
, outbox
, INIT_HCA_SRQC_BASE_OFFSET
);
1568 MLX4_GET(param
->log_num_srqs
, outbox
, INIT_HCA_LOG_SRQ_OFFSET
);
1569 MLX4_GET(param
->cqc_base
, outbox
, INIT_HCA_CQC_BASE_OFFSET
);
1570 MLX4_GET(param
->log_num_cqs
, outbox
, INIT_HCA_LOG_CQ_OFFSET
);
1571 MLX4_GET(param
->altc_base
, outbox
, INIT_HCA_ALTC_BASE_OFFSET
);
1572 MLX4_GET(param
->auxc_base
, outbox
, INIT_HCA_AUXC_BASE_OFFSET
);
1573 MLX4_GET(param
->eqc_base
, outbox
, INIT_HCA_EQC_BASE_OFFSET
);
1574 MLX4_GET(param
->log_num_eqs
, outbox
, INIT_HCA_LOG_EQ_OFFSET
);
1575 MLX4_GET(param
->rdmarc_base
, outbox
, INIT_HCA_RDMARC_BASE_OFFSET
);
1576 MLX4_GET(param
->log_rd_per_qp
, outbox
, INIT_HCA_LOG_RD_OFFSET
);
1578 MLX4_GET(dword_field
, outbox
, INIT_HCA_FLAGS_OFFSET
);
1579 if (dword_field
& (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN
)) {
1580 param
->steering_mode
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
1582 MLX4_GET(byte_field
, outbox
, INIT_HCA_UC_STEERING_OFFSET
);
1583 if (byte_field
& 0x8)
1584 param
->steering_mode
= MLX4_STEERING_MODE_B0
;
1586 param
->steering_mode
= MLX4_STEERING_MODE_A0
;
1588 /* steering attributes */
1589 if (param
->steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1590 MLX4_GET(param
->mc_base
, outbox
, INIT_HCA_FS_BASE_OFFSET
);
1591 MLX4_GET(param
->log_mc_entry_sz
, outbox
,
1592 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET
);
1593 MLX4_GET(param
->log_mc_table_sz
, outbox
,
1594 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET
);
1596 MLX4_GET(param
->mc_base
, outbox
, INIT_HCA_MC_BASE_OFFSET
);
1597 MLX4_GET(param
->log_mc_entry_sz
, outbox
,
1598 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET
);
1599 MLX4_GET(param
->log_mc_hash_sz
, outbox
,
1600 INIT_HCA_LOG_MC_HASH_SZ_OFFSET
);
1601 MLX4_GET(param
->log_mc_table_sz
, outbox
,
1602 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET
);
1605 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1606 MLX4_GET(byte_field
, outbox
, INIT_HCA_EQE_CQE_OFFSETS
);
1607 if (byte_field
& 0x20) /* 64-bytes eqe enabled */
1608 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_EQE_ENABLED
;
1609 if (byte_field
& 0x40) /* 64-bytes cqe enabled */
1610 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_CQE_ENABLED
;
1612 /* TPT attributes */
1614 MLX4_GET(param
->dmpt_base
, outbox
, INIT_HCA_DMPT_BASE_OFFSET
);
1615 MLX4_GET(param
->mw_enabled
, outbox
, INIT_HCA_TPT_MW_OFFSET
);
1616 MLX4_GET(param
->log_mpt_sz
, outbox
, INIT_HCA_LOG_MPT_SZ_OFFSET
);
1617 MLX4_GET(param
->mtt_base
, outbox
, INIT_HCA_MTT_BASE_OFFSET
);
1618 MLX4_GET(param
->cmpt_base
, outbox
, INIT_HCA_CMPT_BASE_OFFSET
);
1620 /* UAR attributes */
1622 MLX4_GET(param
->uar_page_sz
, outbox
, INIT_HCA_UAR_PAGE_SZ_OFFSET
);
1623 MLX4_GET(param
->log_uar_sz
, outbox
, INIT_HCA_LOG_UAR_SZ_OFFSET
);
1626 mlx4_free_cmd_mailbox(dev
, mailbox
);
1631 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1632 * and real QP0 are active, so that the paravirtualized QP0 is ready
1634 static int check_qp0_state(struct mlx4_dev
*dev
, int function
, int port
)
1636 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1637 /* irrelevant if not infiniband */
1638 if (priv
->mfunc
.master
.qp0_state
[port
].proxy_qp0_active
&&
1639 priv
->mfunc
.master
.qp0_state
[port
].qp0_active
)
1644 int mlx4_INIT_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
1645 struct mlx4_vhcr
*vhcr
,
1646 struct mlx4_cmd_mailbox
*inbox
,
1647 struct mlx4_cmd_mailbox
*outbox
,
1648 struct mlx4_cmd_info
*cmd
)
1650 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1651 int port
= mlx4_slave_convert_port(dev
, slave
, vhcr
->in_modifier
);
1657 if (priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
& (1 << port
))
1660 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
) {
1661 /* Enable port only if it was previously disabled */
1662 if (!priv
->mfunc
.master
.init_port_ref
[port
]) {
1663 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1664 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1668 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1670 if (slave
== mlx4_master_func_num(dev
)) {
1671 if (check_qp0_state(dev
, slave
, port
) &&
1672 !priv
->mfunc
.master
.qp0_state
[port
].port_active
) {
1673 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1674 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1677 priv
->mfunc
.master
.qp0_state
[port
].port_active
= 1;
1678 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1681 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1683 ++priv
->mfunc
.master
.init_port_ref
[port
];
1687 int mlx4_INIT_PORT(struct mlx4_dev
*dev
, int port
)
1689 struct mlx4_cmd_mailbox
*mailbox
;
1695 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
1696 #define INIT_PORT_IN_SIZE 256
1697 #define INIT_PORT_FLAGS_OFFSET 0x00
1698 #define INIT_PORT_FLAG_SIG (1 << 18)
1699 #define INIT_PORT_FLAG_NG (1 << 17)
1700 #define INIT_PORT_FLAG_G0 (1 << 16)
1701 #define INIT_PORT_VL_SHIFT 4
1702 #define INIT_PORT_PORT_WIDTH_SHIFT 8
1703 #define INIT_PORT_MTU_OFFSET 0x04
1704 #define INIT_PORT_MAX_GID_OFFSET 0x06
1705 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
1706 #define INIT_PORT_GUID0_OFFSET 0x10
1707 #define INIT_PORT_NODE_GUID_OFFSET 0x18
1708 #define INIT_PORT_SI_GUID_OFFSET 0x20
1710 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1711 if (IS_ERR(mailbox
))
1712 return PTR_ERR(mailbox
);
1713 inbox
= mailbox
->buf
;
1716 flags
|= (dev
->caps
.vl_cap
[port
] & 0xf) << INIT_PORT_VL_SHIFT
;
1717 flags
|= (dev
->caps
.port_width_cap
[port
] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT
;
1718 MLX4_PUT(inbox
, flags
, INIT_PORT_FLAGS_OFFSET
);
1720 field
= 128 << dev
->caps
.ib_mtu_cap
[port
];
1721 MLX4_PUT(inbox
, field
, INIT_PORT_MTU_OFFSET
);
1722 field
= dev
->caps
.gid_table_len
[port
];
1723 MLX4_PUT(inbox
, field
, INIT_PORT_MAX_GID_OFFSET
);
1724 field
= dev
->caps
.pkey_table_len
[port
];
1725 MLX4_PUT(inbox
, field
, INIT_PORT_MAX_PKEY_OFFSET
);
1727 err
= mlx4_cmd(dev
, mailbox
->dma
, port
, 0, MLX4_CMD_INIT_PORT
,
1728 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1730 mlx4_free_cmd_mailbox(dev
, mailbox
);
1732 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1733 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
1737 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT
);
1739 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
1740 struct mlx4_vhcr
*vhcr
,
1741 struct mlx4_cmd_mailbox
*inbox
,
1742 struct mlx4_cmd_mailbox
*outbox
,
1743 struct mlx4_cmd_info
*cmd
)
1745 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1746 int port
= mlx4_slave_convert_port(dev
, slave
, vhcr
->in_modifier
);
1752 if (!(priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&
1756 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
) {
1757 if (priv
->mfunc
.master
.init_port_ref
[port
] == 1) {
1758 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
1759 1000, MLX4_CMD_NATIVE
);
1763 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
1765 /* infiniband port */
1766 if (slave
== mlx4_master_func_num(dev
)) {
1767 if (!priv
->mfunc
.master
.qp0_state
[port
].qp0_active
&&
1768 priv
->mfunc
.master
.qp0_state
[port
].port_active
) {
1769 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
1770 1000, MLX4_CMD_NATIVE
);
1773 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
1774 priv
->mfunc
.master
.qp0_state
[port
].port_active
= 0;
1777 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
1779 --priv
->mfunc
.master
.init_port_ref
[port
];
1783 int mlx4_CLOSE_PORT(struct mlx4_dev
*dev
, int port
)
1785 return mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
, 1000,
1788 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT
);
1790 int mlx4_CLOSE_HCA(struct mlx4_dev
*dev
, int panic
)
1792 return mlx4_cmd(dev
, 0, 0, panic
, MLX4_CMD_CLOSE_HCA
, 1000,
1796 struct mlx4_config_dev
{
1797 __be32 update_flags
;
1799 __be16 vxlan_udp_dport
;
1803 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
1805 static int mlx4_CONFIG_DEV(struct mlx4_dev
*dev
, struct mlx4_config_dev
*config_dev
)
1808 struct mlx4_cmd_mailbox
*mailbox
;
1810 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1811 if (IS_ERR(mailbox
))
1812 return PTR_ERR(mailbox
);
1814 memcpy(mailbox
->buf
, config_dev
, sizeof(*config_dev
));
1816 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_CONFIG_DEV
,
1817 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1819 mlx4_free_cmd_mailbox(dev
, mailbox
);
1823 int mlx4_config_vxlan_port(struct mlx4_dev
*dev
, __be16 udp_port
)
1825 struct mlx4_config_dev config_dev
;
1827 memset(&config_dev
, 0, sizeof(config_dev
));
1828 config_dev
.update_flags
= cpu_to_be32(MLX4_VXLAN_UDP_DPORT
);
1829 config_dev
.vxlan_udp_dport
= udp_port
;
1831 return mlx4_CONFIG_DEV(dev
, &config_dev
);
1833 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port
);
1836 int mlx4_SET_ICM_SIZE(struct mlx4_dev
*dev
, u64 icm_size
, u64
*aux_pages
)
1838 int ret
= mlx4_cmd_imm(dev
, icm_size
, aux_pages
, 0, 0,
1839 MLX4_CMD_SET_ICM_SIZE
,
1840 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1845 * Round up number of system pages needed in case
1846 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1848 *aux_pages
= ALIGN(*aux_pages
, PAGE_SIZE
/ MLX4_ICM_PAGE_SIZE
) >>
1849 (PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
);
1854 int mlx4_NOP(struct mlx4_dev
*dev
)
1856 /* Input modifier of 0x1f means "finish as soon as possible." */
1857 return mlx4_cmd(dev
, 0, 0x1f, 0, MLX4_CMD_NOP
, 100, MLX4_CMD_NATIVE
);
1860 int mlx4_get_phys_port_id(struct mlx4_dev
*dev
)
1864 struct mlx4_cmd_mailbox
*mailbox
;
1866 u32 guid_hi
, guid_lo
;
1868 #define MOD_STAT_CFG_PORT_OFFSET 8
1869 #define MOD_STAT_CFG_GUID_H 0X14
1870 #define MOD_STAT_CFG_GUID_L 0X1c
1872 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1873 if (IS_ERR(mailbox
))
1874 return PTR_ERR(mailbox
);
1875 outbox
= mailbox
->buf
;
1877 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
1878 in_mod
= port
<< MOD_STAT_CFG_PORT_OFFSET
;
1879 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, in_mod
, 0x2,
1880 MLX4_CMD_MOD_STAT_CFG
, MLX4_CMD_TIME_CLASS_A
,
1883 mlx4_err(dev
, "Fail to get port %d uplink guid\n",
1887 MLX4_GET(guid_hi
, outbox
, MOD_STAT_CFG_GUID_H
);
1888 MLX4_GET(guid_lo
, outbox
, MOD_STAT_CFG_GUID_L
);
1889 dev
->caps
.phys_port_id
[port
] = (u64
)guid_lo
|
1893 mlx4_free_cmd_mailbox(dev
, mailbox
);
1897 #define MLX4_WOL_SETUP_MODE (5 << 28)
1898 int mlx4_wol_read(struct mlx4_dev
*dev
, u64
*config
, int port
)
1900 u32 in_mod
= MLX4_WOL_SETUP_MODE
| port
<< 8;
1902 return mlx4_cmd_imm(dev
, 0, config
, in_mod
, 0x3,
1903 MLX4_CMD_MOD_STAT_CFG
, MLX4_CMD_TIME_CLASS_A
,
1906 EXPORT_SYMBOL_GPL(mlx4_wol_read
);
1908 int mlx4_wol_write(struct mlx4_dev
*dev
, u64 config
, int port
)
1910 u32 in_mod
= MLX4_WOL_SETUP_MODE
| port
<< 8;
1912 return mlx4_cmd(dev
, config
, in_mod
, 0x1, MLX4_CMD_MOD_STAT_CFG
,
1913 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1915 EXPORT_SYMBOL_GPL(mlx4_wol_write
);
1922 void mlx4_opreq_action(struct work_struct
*work
)
1924 struct mlx4_priv
*priv
= container_of(work
, struct mlx4_priv
,
1926 struct mlx4_dev
*dev
= &priv
->dev
;
1927 int num_tasks
= atomic_read(&priv
->opreq_count
);
1928 struct mlx4_cmd_mailbox
*mailbox
;
1929 struct mlx4_mgm
*mgm
;
1941 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
1942 #define GET_OP_REQ_TOKEN_OFFSET 0x14
1943 #define GET_OP_REQ_TYPE_OFFSET 0x1a
1944 #define GET_OP_REQ_DATA_OFFSET 0x20
1946 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1947 if (IS_ERR(mailbox
)) {
1948 mlx4_err(dev
, "Failed to allocate mailbox for GET_OP_REQ\n");
1951 outbox
= mailbox
->buf
;
1954 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
1955 MLX4_CMD_GET_OP_REQ
, MLX4_CMD_TIME_CLASS_A
,
1958 mlx4_err(dev
, "Failed to retrieve required operation: %d\n",
1962 MLX4_GET(modifier
, outbox
, GET_OP_REQ_MODIFIER_OFFSET
);
1963 MLX4_GET(token
, outbox
, GET_OP_REQ_TOKEN_OFFSET
);
1964 MLX4_GET(type
, outbox
, GET_OP_REQ_TYPE_OFFSET
);
1969 if (dev
->caps
.steering_mode
==
1970 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1971 mlx4_warn(dev
, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
1975 mgm
= (struct mlx4_mgm
*)((u8
*)(outbox
) +
1976 GET_OP_REQ_DATA_OFFSET
);
1977 num_qps
= be32_to_cpu(mgm
->members_count
) &
1979 rem_mcg
= ((u8
*)(&mgm
->members_count
))[0] & 1;
1980 prot
= ((u8
*)(&mgm
->members_count
))[0] >> 6;
1982 for (i
= 0; i
< num_qps
; i
++) {
1983 qp
.qpn
= be32_to_cpu(mgm
->qp
[i
]);
1985 err
= mlx4_multicast_detach(dev
, &qp
,
1989 err
= mlx4_multicast_attach(dev
, &qp
,
1999 mlx4_warn(dev
, "Bad type for required operation\n");
2003 err
= mlx4_cmd(dev
, 0, ((u32
) err
|
2004 (__force u32
)cpu_to_be32(token
) << 16),
2005 1, MLX4_CMD_GET_OP_REQ
, MLX4_CMD_TIME_CLASS_A
,
2008 mlx4_err(dev
, "Failed to acknowledge required request: %d\n",
2012 memset(outbox
, 0, 0xffc);
2013 num_tasks
= atomic_dec_return(&priv
->opreq_count
);
2017 mlx4_free_cmd_mailbox(dev
, mailbox
);