2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
44 MLX4_COMMAND_INTERFACE_MIN_REV
= 2,
45 MLX4_COMMAND_INTERFACE_MAX_REV
= 3,
46 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS
= 3,
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
52 static bool enable_qos
;
53 module_param(enable_qos
, bool, 0444);
54 MODULE_PARM_DESC(enable_qos
, "Enable Quality of Service support in the HCA (default: off)");
56 #define MLX4_GET(dest, source, offset) \
58 void *__p = (char *) (source) + (offset); \
59 switch (sizeof (dest)) { \
60 case 1: (dest) = *(u8 *) __p; break; \
61 case 2: (dest) = be16_to_cpup(__p); break; \
62 case 4: (dest) = be32_to_cpup(__p); break; \
63 case 8: (dest) = be64_to_cpup(__p); break; \
64 default: __buggy_use_of_MLX4_GET(); \
68 #define MLX4_PUT(dest, source, offset) \
70 void *__d = ((char *) (dest) + (offset)); \
71 switch (sizeof(source)) { \
72 case 1: *(u8 *) __d = (source); break; \
73 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
74 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
75 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
76 default: __buggy_use_of_MLX4_PUT(); \
80 static void dump_dev_cap_flags(struct mlx4_dev
*dev
, u64 flags
)
82 static const char *fname
[] = {
83 [ 0] = "RC transport",
84 [ 1] = "UC transport",
85 [ 2] = "UD transport",
86 [ 3] = "XRC transport",
87 [ 4] = "reliable multicast",
88 [ 5] = "FCoIB support",
90 [ 7] = "IPoIB checksum offload",
91 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter",
94 [12] = "Dual Port Different Protocol (DPDP) support",
95 [15] = "Big LSO headers",
98 [18] = "Atomic ops support",
99 [19] = "Raw multicast support",
100 [20] = "Address vector port checking support",
101 [21] = "UD multicast support",
102 [24] = "Demand paging support",
103 [25] = "Router support",
104 [30] = "IBoE support",
105 [32] = "Unicast loopback support",
106 [34] = "FCS header control",
107 [38] = "Wake On LAN support",
108 [40] = "UDP RSS support",
109 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support",
111 [48] = "Counters support",
112 [53] = "Port ETS Scheduler support",
113 [55] = "Port link type sensing support",
114 [59] = "Port management change event support",
115 [61] = "64 byte EQE support",
116 [62] = "64 byte CQE support",
120 mlx4_dbg(dev
, "DEV_CAP flags:\n");
121 for (i
= 0; i
< ARRAY_SIZE(fname
); ++i
)
122 if (fname
[i
] && (flags
& (1LL << i
)))
123 mlx4_dbg(dev
, " %s\n", fname
[i
]);
126 static void dump_dev_cap_flags2(struct mlx4_dev
*dev
, u64 flags
)
128 static const char * const fname
[] = {
130 [1] = "RSS Toeplitz Hash Function support",
131 [2] = "RSS XOR Hash Function support",
132 [3] = "Device manage flow steering support",
133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support",
135 [6] = "VST (control vlan insertion/stripping) support",
136 [7] = "FSM (MAC anti-spoofing) support",
137 [8] = "Dynamic QP updates support",
138 [9] = "TCP/IP offloads/flow-steering for VXLAN support"
142 for (i
= 0; i
< ARRAY_SIZE(fname
); ++i
)
143 if (fname
[i
] && (flags
& (1LL << i
)))
144 mlx4_dbg(dev
, " %s\n", fname
[i
]);
147 int mlx4_MOD_STAT_CFG(struct mlx4_dev
*dev
, struct mlx4_mod_stat_cfg
*cfg
)
149 struct mlx4_cmd_mailbox
*mailbox
;
153 #define MOD_STAT_CFG_IN_SIZE 0x100
155 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
156 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
158 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
160 return PTR_ERR(mailbox
);
161 inbox
= mailbox
->buf
;
163 MLX4_PUT(inbox
, cfg
->log_pg_sz
, MOD_STAT_CFG_PG_SZ_OFFSET
);
164 MLX4_PUT(inbox
, cfg
->log_pg_sz_m
, MOD_STAT_CFG_PG_SZ_M_OFFSET
);
166 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_MOD_STAT_CFG
,
167 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
169 mlx4_free_cmd_mailbox(dev
, mailbox
);
173 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev
*dev
, int slave
,
174 struct mlx4_vhcr
*vhcr
,
175 struct mlx4_cmd_mailbox
*inbox
,
176 struct mlx4_cmd_mailbox
*outbox
,
177 struct mlx4_cmd_info
*cmd
)
179 struct mlx4_priv
*priv
= mlx4_priv(dev
);
184 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
185 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
186 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
187 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
188 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
189 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
190 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
191 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
192 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
193 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
194 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
195 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
197 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
198 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
199 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
200 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
201 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
202 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
204 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
205 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
206 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
207 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
209 /* when opcode modifier = 1 */
210 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
211 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
212 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
214 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
215 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
216 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
217 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
218 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
220 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
221 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
222 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
224 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
226 if (vhcr
->op_modifier
== 1) {
227 /* Set nic_info bit to mark new fields support */
228 field
= QUERY_FUNC_CAP_FLAGS1_NIC_INFO
;
229 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FLAGS1_OFFSET
);
231 field
= vhcr
->in_modifier
; /* phys-port = logical-port */
232 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_PHYS_PORT_OFFSET
);
234 /* size is now the QP number */
235 size
= dev
->phys_caps
.base_tunnel_sqpn
+ 8 * slave
+ field
- 1;
236 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP0_TUNNEL
);
239 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP1_TUNNEL
);
241 size
= dev
->phys_caps
.base_proxy_sqpn
+ 8 * slave
+ field
- 1;
242 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP0_PROXY
);
245 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP1_PROXY
);
247 MLX4_PUT(outbox
->buf
, dev
->caps
.phys_port_id
[vhcr
->in_modifier
],
248 QUERY_FUNC_CAP_PHYS_PORT_ID
);
250 } else if (vhcr
->op_modifier
== 0) {
251 /* enable rdma and ethernet interfaces, and new quota locations */
252 field
= (QUERY_FUNC_CAP_FLAG_ETH
| QUERY_FUNC_CAP_FLAG_RDMA
|
253 QUERY_FUNC_CAP_FLAG_QUOTAS
);
254 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FLAGS_OFFSET
);
256 field
= dev
->caps
.num_ports
;
257 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_NUM_PORTS_OFFSET
);
259 size
= dev
->caps
.function_caps
; /* set PF behaviours */
260 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_PF_BHVR_OFFSET
);
262 field
= 0; /* protected FMR support not available as yet */
263 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FMR_OFFSET
);
265 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[slave
];
266 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET
);
267 size
= dev
->caps
.num_qps
;
268 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP
);
270 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[slave
];
271 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET
);
272 size
= dev
->caps
.num_srqs
;
273 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP
);
275 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[slave
];
276 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET
);
277 size
= dev
->caps
.num_cqs
;
278 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP
);
280 size
= dev
->caps
.num_eqs
;
281 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
283 size
= dev
->caps
.reserved_eqs
;
284 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
286 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[slave
];
287 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET
);
288 size
= dev
->caps
.num_mpts
;
289 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP
);
291 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[slave
];
292 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET
);
293 size
= dev
->caps
.num_mtts
;
294 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP
);
296 size
= dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
;
297 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET
);
298 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP
);
306 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev
*dev
, u32 gen_or_port
,
307 struct mlx4_func_cap
*func_cap
)
309 struct mlx4_cmd_mailbox
*mailbox
;
311 u8 field
, op_modifier
;
313 int err
= 0, quotas
= 0;
315 op_modifier
= !!gen_or_port
; /* 0 = general, 1 = logical port */
317 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
319 return PTR_ERR(mailbox
);
321 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, gen_or_port
, op_modifier
,
322 MLX4_CMD_QUERY_FUNC_CAP
,
323 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
327 outbox
= mailbox
->buf
;
330 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_FLAGS_OFFSET
);
331 if (!(field
& (QUERY_FUNC_CAP_FLAG_ETH
| QUERY_FUNC_CAP_FLAG_RDMA
))) {
332 mlx4_err(dev
, "The host supports neither eth nor rdma interfaces\n");
333 err
= -EPROTONOSUPPORT
;
336 func_cap
->flags
= field
;
337 quotas
= !!(func_cap
->flags
& QUERY_FUNC_CAP_FLAG_QUOTAS
);
339 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_NUM_PORTS_OFFSET
);
340 func_cap
->num_ports
= field
;
342 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_PF_BHVR_OFFSET
);
343 func_cap
->pf_context_behaviour
= size
;
346 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET
);
347 func_cap
->qp_quota
= size
& 0xFFFFFF;
349 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET
);
350 func_cap
->srq_quota
= size
& 0xFFFFFF;
352 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET
);
353 func_cap
->cq_quota
= size
& 0xFFFFFF;
355 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET
);
356 func_cap
->mpt_quota
= size
& 0xFFFFFF;
358 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET
);
359 func_cap
->mtt_quota
= size
& 0xFFFFFF;
361 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET
);
362 func_cap
->mcg_quota
= size
& 0xFFFFFF;
365 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP
);
366 func_cap
->qp_quota
= size
& 0xFFFFFF;
368 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP
);
369 func_cap
->srq_quota
= size
& 0xFFFFFF;
371 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP
);
372 func_cap
->cq_quota
= size
& 0xFFFFFF;
374 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP
);
375 func_cap
->mpt_quota
= size
& 0xFFFFFF;
377 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP
);
378 func_cap
->mtt_quota
= size
& 0xFFFFFF;
380 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP
);
381 func_cap
->mcg_quota
= size
& 0xFFFFFF;
383 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
384 func_cap
->max_eq
= size
& 0xFFFFFF;
386 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
387 func_cap
->reserved_eq
= size
& 0xFFFFFF;
392 /* logical port query */
393 if (gen_or_port
> dev
->caps
.num_ports
) {
398 MLX4_GET(func_cap
->flags1
, outbox
, QUERY_FUNC_CAP_FLAGS1_OFFSET
);
399 if (dev
->caps
.port_type
[gen_or_port
] == MLX4_PORT_TYPE_ETH
) {
400 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_OFFSET
) {
401 mlx4_err(dev
, "VLAN is enforced on this port\n");
402 err
= -EPROTONOSUPPORT
;
406 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_FORCE_MAC
) {
407 mlx4_err(dev
, "Force mac is enabled on this port\n");
408 err
= -EPROTONOSUPPORT
;
411 } else if (dev
->caps
.port_type
[gen_or_port
] == MLX4_PORT_TYPE_IB
) {
412 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_FLAGS0_OFFSET
);
413 if (field
& QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID
) {
414 mlx4_err(dev
, "phy_wqe_gid is "
415 "enforced on this ib port\n");
416 err
= -EPROTONOSUPPORT
;
421 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_PHYS_PORT_OFFSET
);
422 func_cap
->physical_port
= field
;
423 if (func_cap
->physical_port
!= gen_or_port
) {
428 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP0_TUNNEL
);
429 func_cap
->qp0_tunnel_qpn
= size
& 0xFFFFFF;
431 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP0_PROXY
);
432 func_cap
->qp0_proxy_qpn
= size
& 0xFFFFFF;
434 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP1_TUNNEL
);
435 func_cap
->qp1_tunnel_qpn
= size
& 0xFFFFFF;
437 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP1_PROXY
);
438 func_cap
->qp1_proxy_qpn
= size
& 0xFFFFFF;
440 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_NIC_INFO
)
441 MLX4_GET(func_cap
->phys_port_id
, outbox
,
442 QUERY_FUNC_CAP_PHYS_PORT_ID
);
444 /* All other resources are allocated by the master, but we still report
445 * 'num' and 'reserved' capabilities as follows:
446 * - num remains the maximum resource index
447 * - 'num - reserved' is the total available objects of a resource, but
448 * resource indices may be less than 'reserved'
449 * TODO: set per-resource quotas */
452 mlx4_free_cmd_mailbox(dev
, mailbox
);
457 int mlx4_QUERY_DEV_CAP(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
459 struct mlx4_cmd_mailbox
*mailbox
;
462 u32 field32
, flags
, ext_flags
;
468 #define QUERY_DEV_CAP_OUT_SIZE 0x100
469 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
470 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
471 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
472 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
473 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
474 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
475 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
476 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
477 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
478 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
479 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
480 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
481 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
482 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
483 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
484 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
485 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
486 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
487 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
488 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
489 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
490 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
491 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
492 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
493 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
494 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
495 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
496 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
497 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
498 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
499 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
500 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
501 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
502 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
503 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
504 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
505 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
506 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
507 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
508 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
509 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
510 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
511 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
512 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
513 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
514 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
515 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
516 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
517 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
518 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
519 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
520 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
521 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
522 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
523 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
524 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
525 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
526 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
527 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
528 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
529 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
530 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
531 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
532 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
533 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
534 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
535 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
536 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
537 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
538 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
539 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
540 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
541 #define QUERY_DEV_CAP_VXLAN 0x9e
544 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
546 return PTR_ERR(mailbox
);
547 outbox
= mailbox
->buf
;
549 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
550 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
554 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_QP_OFFSET
);
555 dev_cap
->reserved_qps
= 1 << (field
& 0xf);
556 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_OFFSET
);
557 dev_cap
->max_qps
= 1 << (field
& 0x1f);
558 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_SRQ_OFFSET
);
559 dev_cap
->reserved_srqs
= 1 << (field
>> 4);
560 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SRQ_OFFSET
);
561 dev_cap
->max_srqs
= 1 << (field
& 0x1f);
562 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET
);
563 dev_cap
->max_cq_sz
= 1 << field
;
564 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_CQ_OFFSET
);
565 dev_cap
->reserved_cqs
= 1 << (field
& 0xf);
566 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_CQ_OFFSET
);
567 dev_cap
->max_cqs
= 1 << (field
& 0x1f);
568 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MPT_OFFSET
);
569 dev_cap
->max_mpts
= 1 << (field
& 0x3f);
570 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_EQ_OFFSET
);
571 dev_cap
->reserved_eqs
= field
& 0xf;
572 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_EQ_OFFSET
);
573 dev_cap
->max_eqs
= 1 << (field
& 0xf);
574 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MTT_OFFSET
);
575 dev_cap
->reserved_mtts
= 1 << (field
>> 4);
576 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET
);
577 dev_cap
->max_mrw_sz
= 1 << field
;
578 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MRW_OFFSET
);
579 dev_cap
->reserved_mrws
= 1 << (field
& 0xf);
580 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET
);
581 dev_cap
->max_mtt_seg
= 1 << (field
& 0x3f);
582 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET
);
583 dev_cap
->max_requester_per_qp
= 1 << (field
& 0x3f);
584 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_RES_QP_OFFSET
);
585 dev_cap
->max_responder_per_qp
= 1 << (field
& 0x3f);
586 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_GSO_OFFSET
);
589 dev_cap
->max_gso_sz
= 0;
591 dev_cap
->max_gso_sz
= 1 << field
;
593 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSS_OFFSET
);
595 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS_XOR
;
597 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS_TOP
;
600 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS
;
601 dev_cap
->max_rss_tbl_sz
= 1 << field
;
603 dev_cap
->max_rss_tbl_sz
= 0;
604 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_RDMA_OFFSET
);
605 dev_cap
->max_rdma_global
= 1 << (field
& 0x3f);
606 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_ACK_DELAY_OFFSET
);
607 dev_cap
->local_ca_ack_delay
= field
& 0x1f;
608 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
609 dev_cap
->num_ports
= field
& 0xf;
610 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET
);
611 dev_cap
->max_msg_sz
= 1 << (field
& 0x1f);
612 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
614 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_FS_EN
;
615 dev_cap
->fs_log_max_ucast_qp_range_size
= field
& 0x1f;
616 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
618 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
;
619 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET
);
620 dev_cap
->fs_max_num_qp_per_entry
= field
;
621 MLX4_GET(stat_rate
, outbox
, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET
);
622 dev_cap
->stat_rate_support
= stat_rate
;
623 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
625 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_TS
;
626 MLX4_GET(ext_flags
, outbox
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
627 MLX4_GET(flags
, outbox
, QUERY_DEV_CAP_FLAGS_OFFSET
);
628 dev_cap
->flags
= flags
| (u64
)ext_flags
<< 32;
629 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_UAR_OFFSET
);
630 dev_cap
->reserved_uars
= field
>> 4;
631 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_UAR_SZ_OFFSET
);
632 dev_cap
->uar_size
= 1 << ((field
& 0x3f) + 20);
633 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_PAGE_SZ_OFFSET
);
634 dev_cap
->min_page_sz
= 1 << field
;
636 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_BF_OFFSET
);
638 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET
);
639 dev_cap
->bf_reg_size
= 1 << (field
& 0x1f);
640 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET
);
641 if ((1 << (field
& 0x3f)) > (PAGE_SIZE
/ dev_cap
->bf_reg_size
))
643 dev_cap
->bf_regs_per_page
= 1 << (field
& 0x3f);
644 mlx4_dbg(dev
, "BlueFlame available (reg size %d, regs/page %d)\n",
645 dev_cap
->bf_reg_size
, dev_cap
->bf_regs_per_page
);
647 dev_cap
->bf_reg_size
= 0;
648 mlx4_dbg(dev
, "BlueFlame not available\n");
651 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET
);
652 dev_cap
->max_sq_sg
= field
;
653 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET
);
654 dev_cap
->max_sq_desc_sz
= size
;
656 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET
);
657 dev_cap
->max_qp_per_mcg
= 1 << field
;
658 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MCG_OFFSET
);
659 dev_cap
->reserved_mgms
= field
& 0xf;
660 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MCG_OFFSET
);
661 dev_cap
->max_mcgs
= 1 << field
;
662 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_PD_OFFSET
);
663 dev_cap
->reserved_pds
= field
>> 4;
664 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_PD_OFFSET
);
665 dev_cap
->max_pds
= 1 << (field
& 0x3f);
666 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_XRC_OFFSET
);
667 dev_cap
->reserved_xrcds
= field
>> 4;
668 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_XRC_OFFSET
);
669 dev_cap
->max_xrcds
= 1 << (field
& 0x1f);
671 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET
);
672 dev_cap
->rdmarc_entry_sz
= size
;
673 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET
);
674 dev_cap
->qpc_entry_sz
= size
;
675 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET
);
676 dev_cap
->aux_entry_sz
= size
;
677 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET
);
678 dev_cap
->altc_entry_sz
= size
;
679 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET
);
680 dev_cap
->eqc_entry_sz
= size
;
681 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET
);
682 dev_cap
->cqc_entry_sz
= size
;
683 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET
);
684 dev_cap
->srq_entry_sz
= size
;
685 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET
);
686 dev_cap
->cmpt_entry_sz
= size
;
687 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET
);
688 dev_cap
->mtt_entry_sz
= size
;
689 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET
);
690 dev_cap
->dmpt_entry_sz
= size
;
692 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET
);
693 dev_cap
->max_srq_sz
= 1 << field
;
694 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET
);
695 dev_cap
->max_qp_sz
= 1 << field
;
696 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSZ_SRQ_OFFSET
);
697 dev_cap
->resize_srq
= field
& 1;
698 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET
);
699 dev_cap
->max_rq_sg
= field
;
700 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET
);
701 dev_cap
->max_rq_desc_sz
= size
;
703 MLX4_GET(dev_cap
->bmme_flags
, outbox
,
704 QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
705 MLX4_GET(dev_cap
->reserved_lkey
, outbox
,
706 QUERY_DEV_CAP_RSVD_LKEY_OFFSET
);
707 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FW_REASSIGN_MAC
);
709 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN
;
710 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VXLAN
);
712 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
;
713 MLX4_GET(dev_cap
->max_icm_sz
, outbox
,
714 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET
);
715 if (dev_cap
->flags
& MLX4_DEV_CAP_FLAG_COUNTERS
)
716 MLX4_GET(dev_cap
->max_counters
, outbox
,
717 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET
);
719 MLX4_GET(field32
, outbox
, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET
);
720 if (field32
& (1 << 16))
721 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_UPDATE_QP
;
722 if (field32
& (1 << 26))
723 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL
;
724 if (field32
& (1 << 20))
725 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_FSM
;
727 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
728 for (i
= 1; i
<= dev_cap
->num_ports
; ++i
) {
729 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
730 dev_cap
->max_vl
[i
] = field
>> 4;
731 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MTU_WIDTH_OFFSET
);
732 dev_cap
->ib_mtu
[i
] = field
>> 4;
733 dev_cap
->max_port_width
[i
] = field
& 0xf;
734 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_GID_OFFSET
);
735 dev_cap
->max_gids
[i
] = 1 << (field
& 0xf);
736 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_PKEY_OFFSET
);
737 dev_cap
->max_pkeys
[i
] = 1 << (field
& 0xf);
740 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
741 #define QUERY_PORT_MTU_OFFSET 0x01
742 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
743 #define QUERY_PORT_WIDTH_OFFSET 0x06
744 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
745 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
746 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
747 #define QUERY_PORT_MAC_OFFSET 0x10
748 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
749 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
750 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
752 for (i
= 1; i
<= dev_cap
->num_ports
; ++i
) {
753 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, i
, 0, MLX4_CMD_QUERY_PORT
,
754 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
758 MLX4_GET(field
, outbox
, QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
759 dev_cap
->supported_port_types
[i
] = field
& 3;
760 dev_cap
->suggested_type
[i
] = (field
>> 3) & 1;
761 dev_cap
->default_sense
[i
] = (field
>> 4) & 1;
762 MLX4_GET(field
, outbox
, QUERY_PORT_MTU_OFFSET
);
763 dev_cap
->ib_mtu
[i
] = field
& 0xf;
764 MLX4_GET(field
, outbox
, QUERY_PORT_WIDTH_OFFSET
);
765 dev_cap
->max_port_width
[i
] = field
& 0xf;
766 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_GID_PKEY_OFFSET
);
767 dev_cap
->max_gids
[i
] = 1 << (field
>> 4);
768 dev_cap
->max_pkeys
[i
] = 1 << (field
& 0xf);
769 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_VL_OFFSET
);
770 dev_cap
->max_vl
[i
] = field
& 0xf;
771 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_MACVLAN_OFFSET
);
772 dev_cap
->log_max_macs
[i
] = field
& 0xf;
773 dev_cap
->log_max_vlans
[i
] = field
>> 4;
774 MLX4_GET(dev_cap
->eth_mtu
[i
], outbox
, QUERY_PORT_ETH_MTU_OFFSET
);
775 MLX4_GET(dev_cap
->def_mac
[i
], outbox
, QUERY_PORT_MAC_OFFSET
);
776 MLX4_GET(field32
, outbox
, QUERY_PORT_TRANS_VENDOR_OFFSET
);
777 dev_cap
->trans_type
[i
] = field32
>> 24;
778 dev_cap
->vendor_oui
[i
] = field32
& 0xffffff;
779 MLX4_GET(dev_cap
->wavelength
[i
], outbox
, QUERY_PORT_WAVELENGTH_OFFSET
);
780 MLX4_GET(dev_cap
->trans_code
[i
], outbox
, QUERY_PORT_TRANS_CODE_OFFSET
);
784 mlx4_dbg(dev
, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
785 dev_cap
->bmme_flags
, dev_cap
->reserved_lkey
);
788 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
789 * we can't use any EQs whose doorbell falls on that page,
790 * even if the EQ itself isn't reserved.
792 dev_cap
->reserved_eqs
= max(dev_cap
->reserved_uars
* 4,
793 dev_cap
->reserved_eqs
);
795 mlx4_dbg(dev
, "Max ICM size %lld MB\n",
796 (unsigned long long) dev_cap
->max_icm_sz
>> 20);
797 mlx4_dbg(dev
, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
798 dev_cap
->max_qps
, dev_cap
->reserved_qps
, dev_cap
->qpc_entry_sz
);
799 mlx4_dbg(dev
, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
800 dev_cap
->max_srqs
, dev_cap
->reserved_srqs
, dev_cap
->srq_entry_sz
);
801 mlx4_dbg(dev
, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
802 dev_cap
->max_cqs
, dev_cap
->reserved_cqs
, dev_cap
->cqc_entry_sz
);
803 mlx4_dbg(dev
, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
804 dev_cap
->max_eqs
, dev_cap
->reserved_eqs
, dev_cap
->eqc_entry_sz
);
805 mlx4_dbg(dev
, "reserved MPTs: %d, reserved MTTs: %d\n",
806 dev_cap
->reserved_mrws
, dev_cap
->reserved_mtts
);
807 mlx4_dbg(dev
, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
808 dev_cap
->max_pds
, dev_cap
->reserved_pds
, dev_cap
->reserved_uars
);
809 mlx4_dbg(dev
, "Max QP/MCG: %d, reserved MGMs: %d\n",
810 dev_cap
->max_pds
, dev_cap
->reserved_mgms
);
811 mlx4_dbg(dev
, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
812 dev_cap
->max_cq_sz
, dev_cap
->max_qp_sz
, dev_cap
->max_srq_sz
);
813 mlx4_dbg(dev
, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
814 dev_cap
->local_ca_ack_delay
, 128 << dev_cap
->ib_mtu
[1],
815 dev_cap
->max_port_width
[1]);
816 mlx4_dbg(dev
, "Max SQ desc size: %d, max SQ S/G: %d\n",
817 dev_cap
->max_sq_desc_sz
, dev_cap
->max_sq_sg
);
818 mlx4_dbg(dev
, "Max RQ desc size: %d, max RQ S/G: %d\n",
819 dev_cap
->max_rq_desc_sz
, dev_cap
->max_rq_sg
);
820 mlx4_dbg(dev
, "Max GSO size: %d\n", dev_cap
->max_gso_sz
);
821 mlx4_dbg(dev
, "Max counters: %d\n", dev_cap
->max_counters
);
822 mlx4_dbg(dev
, "Max RSS Table size: %d\n", dev_cap
->max_rss_tbl_sz
);
824 dump_dev_cap_flags(dev
, dev_cap
->flags
);
825 dump_dev_cap_flags2(dev
, dev_cap
->flags2
);
828 mlx4_free_cmd_mailbox(dev
, mailbox
);
832 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev
*dev
, int slave
,
833 struct mlx4_vhcr
*vhcr
,
834 struct mlx4_cmd_mailbox
*inbox
,
835 struct mlx4_cmd_mailbox
*outbox
,
836 struct mlx4_cmd_info
*cmd
)
843 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
844 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
848 /* add port mng change event capability and disable mw type 1
849 * unconditionally to slaves
851 MLX4_GET(flags
, outbox
->buf
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
852 flags
|= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV
;
853 flags
&= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW
;
854 MLX4_PUT(outbox
->buf
, flags
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
856 /* For guests, disable timestamp */
857 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
859 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
861 /* For guests, disable vxlan tunneling */
862 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VXLAN
);
864 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_VXLAN
);
866 /* For guests, report Blueflame disabled */
867 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_BF_OFFSET
);
869 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_BF_OFFSET
);
871 /* For guests, disable mw type 2 */
872 MLX4_GET(bmme_flags
, outbox
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
873 bmme_flags
&= ~MLX4_BMME_FLAG_TYPE_2_WIN
;
874 MLX4_PUT(outbox
->buf
, bmme_flags
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
876 /* turn off device-managed steering capability if not enabled */
877 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_DEVICE_MANAGED
) {
878 MLX4_GET(field
, outbox
->buf
,
879 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
881 MLX4_PUT(outbox
->buf
, field
,
882 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
885 /* turn off ipoib managed steering for guests */
886 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
888 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
893 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
894 struct mlx4_vhcr
*vhcr
,
895 struct mlx4_cmd_mailbox
*inbox
,
896 struct mlx4_cmd_mailbox
*outbox
,
897 struct mlx4_cmd_info
*cmd
)
899 struct mlx4_priv
*priv
= mlx4_priv(dev
);
904 int admin_link_state
;
906 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
907 #define MLX4_PORT_LINK_UP_MASK 0x80
908 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
909 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
911 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, vhcr
->in_modifier
, 0,
912 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
915 if (!err
&& dev
->caps
.function
!= slave
) {
916 def_mac
= priv
->mfunc
.master
.vf_oper
[slave
].vport
[vhcr
->in_modifier
].state
.mac
;
917 MLX4_PUT(outbox
->buf
, def_mac
, QUERY_PORT_MAC_OFFSET
);
919 /* get port type - currently only eth is enabled */
920 MLX4_GET(port_type
, outbox
->buf
,
921 QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
923 /* No link sensing allowed */
924 port_type
&= MLX4_VF_PORT_NO_LINK_SENSE_MASK
;
925 /* set port type to currently operating port type */
926 port_type
|= (dev
->caps
.port_type
[vhcr
->in_modifier
] & 0x3);
928 admin_link_state
= priv
->mfunc
.master
.vf_oper
[slave
].vport
[vhcr
->in_modifier
].state
.link_state
;
929 if (IFLA_VF_LINK_STATE_ENABLE
== admin_link_state
)
930 port_type
|= MLX4_PORT_LINK_UP_MASK
;
931 else if (IFLA_VF_LINK_STATE_DISABLE
== admin_link_state
)
932 port_type
&= ~MLX4_PORT_LINK_UP_MASK
;
934 MLX4_PUT(outbox
->buf
, port_type
,
935 QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
937 short_field
= 1; /* slave max gids */
938 MLX4_PUT(outbox
->buf
, short_field
,
939 QUERY_PORT_CUR_MAX_GID_OFFSET
);
941 short_field
= dev
->caps
.pkey_table_len
[vhcr
->in_modifier
];
942 MLX4_PUT(outbox
->buf
, short_field
,
943 QUERY_PORT_CUR_MAX_PKEY_OFFSET
);
949 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev
*dev
, u8 port
,
950 int *gid_tbl_len
, int *pkey_tbl_len
)
952 struct mlx4_cmd_mailbox
*mailbox
;
957 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
959 return PTR_ERR(mailbox
);
961 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, port
, 0,
962 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
967 outbox
= mailbox
->buf
;
969 MLX4_GET(field
, outbox
, QUERY_PORT_CUR_MAX_GID_OFFSET
);
970 *gid_tbl_len
= field
;
972 MLX4_GET(field
, outbox
, QUERY_PORT_CUR_MAX_PKEY_OFFSET
);
973 *pkey_tbl_len
= field
;
976 mlx4_free_cmd_mailbox(dev
, mailbox
);
979 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len
);
981 int mlx4_map_cmd(struct mlx4_dev
*dev
, u16 op
, struct mlx4_icm
*icm
, u64 virt
)
983 struct mlx4_cmd_mailbox
*mailbox
;
984 struct mlx4_icm_iter iter
;
992 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
994 return PTR_ERR(mailbox
);
995 pages
= mailbox
->buf
;
997 for (mlx4_icm_first(icm
, &iter
);
998 !mlx4_icm_last(&iter
);
999 mlx4_icm_next(&iter
)) {
1001 * We have to pass pages that are aligned to their
1002 * size, so find the least significant 1 in the
1003 * address or size and use that as our log2 size.
1005 lg
= ffs(mlx4_icm_addr(&iter
) | mlx4_icm_size(&iter
)) - 1;
1006 if (lg
< MLX4_ICM_PAGE_SHIFT
) {
1007 mlx4_warn(dev
, "Got FW area not aligned to %d (%llx/%lx).\n",
1009 (unsigned long long) mlx4_icm_addr(&iter
),
1010 mlx4_icm_size(&iter
));
1015 for (i
= 0; i
< mlx4_icm_size(&iter
) >> lg
; ++i
) {
1017 pages
[nent
* 2] = cpu_to_be64(virt
);
1021 pages
[nent
* 2 + 1] =
1022 cpu_to_be64((mlx4_icm_addr(&iter
) + (i
<< lg
)) |
1023 (lg
- MLX4_ICM_PAGE_SHIFT
));
1024 ts
+= 1 << (lg
- 10);
1027 if (++nent
== MLX4_MAILBOX_SIZE
/ 16) {
1028 err
= mlx4_cmd(dev
, mailbox
->dma
, nent
, 0, op
,
1029 MLX4_CMD_TIME_CLASS_B
,
1039 err
= mlx4_cmd(dev
, mailbox
->dma
, nent
, 0, op
,
1040 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1045 case MLX4_CMD_MAP_FA
:
1046 mlx4_dbg(dev
, "Mapped %d chunks/%d KB for FW.\n", tc
, ts
);
1048 case MLX4_CMD_MAP_ICM_AUX
:
1049 mlx4_dbg(dev
, "Mapped %d chunks/%d KB for ICM aux.\n", tc
, ts
);
1051 case MLX4_CMD_MAP_ICM
:
1052 mlx4_dbg(dev
, "Mapped %d chunks/%d KB at %llx for ICM.\n",
1053 tc
, ts
, (unsigned long long) virt
- (ts
<< 10));
1058 mlx4_free_cmd_mailbox(dev
, mailbox
);
1062 int mlx4_MAP_FA(struct mlx4_dev
*dev
, struct mlx4_icm
*icm
)
1064 return mlx4_map_cmd(dev
, MLX4_CMD_MAP_FA
, icm
, -1);
1067 int mlx4_UNMAP_FA(struct mlx4_dev
*dev
)
1069 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_UNMAP_FA
,
1070 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1074 int mlx4_RUN_FW(struct mlx4_dev
*dev
)
1076 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_RUN_FW
,
1077 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1080 int mlx4_QUERY_FW(struct mlx4_dev
*dev
)
1082 struct mlx4_fw
*fw
= &mlx4_priv(dev
)->fw
;
1083 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
1084 struct mlx4_cmd_mailbox
*mailbox
;
1091 #define QUERY_FW_OUT_SIZE 0x100
1092 #define QUERY_FW_VER_OFFSET 0x00
1093 #define QUERY_FW_PPF_ID 0x09
1094 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1095 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1096 #define QUERY_FW_ERR_START_OFFSET 0x30
1097 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1098 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1100 #define QUERY_FW_SIZE_OFFSET 0x00
1101 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1102 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1104 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1105 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1107 #define QUERY_FW_CLOCK_OFFSET 0x50
1108 #define QUERY_FW_CLOCK_BAR 0x58
1110 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1111 if (IS_ERR(mailbox
))
1112 return PTR_ERR(mailbox
);
1113 outbox
= mailbox
->buf
;
1115 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_FW
,
1116 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1120 MLX4_GET(fw_ver
, outbox
, QUERY_FW_VER_OFFSET
);
1122 * FW subminor version is at more significant bits than minor
1123 * version, so swap here.
1125 dev
->caps
.fw_ver
= (fw_ver
& 0xffff00000000ull
) |
1126 ((fw_ver
& 0xffff0000ull
) >> 16) |
1127 ((fw_ver
& 0x0000ffffull
) << 16);
1129 MLX4_GET(lg
, outbox
, QUERY_FW_PPF_ID
);
1130 dev
->caps
.function
= lg
;
1132 if (mlx4_is_slave(dev
))
1136 MLX4_GET(cmd_if_rev
, outbox
, QUERY_FW_CMD_IF_REV_OFFSET
);
1137 if (cmd_if_rev
< MLX4_COMMAND_INTERFACE_MIN_REV
||
1138 cmd_if_rev
> MLX4_COMMAND_INTERFACE_MAX_REV
) {
1139 mlx4_err(dev
, "Installed FW has unsupported "
1140 "command interface revision %d.\n",
1142 mlx4_err(dev
, "(Installed FW version is %d.%d.%03d)\n",
1143 (int) (dev
->caps
.fw_ver
>> 32),
1144 (int) (dev
->caps
.fw_ver
>> 16) & 0xffff,
1145 (int) dev
->caps
.fw_ver
& 0xffff);
1146 mlx4_err(dev
, "This driver version supports only revisions %d to %d.\n",
1147 MLX4_COMMAND_INTERFACE_MIN_REV
, MLX4_COMMAND_INTERFACE_MAX_REV
);
1152 if (cmd_if_rev
< MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS
)
1153 dev
->flags
|= MLX4_FLAG_OLD_PORT_CMDS
;
1155 MLX4_GET(lg
, outbox
, QUERY_FW_MAX_CMD_OFFSET
);
1156 cmd
->max_cmds
= 1 << lg
;
1158 mlx4_dbg(dev
, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1159 (int) (dev
->caps
.fw_ver
>> 32),
1160 (int) (dev
->caps
.fw_ver
>> 16) & 0xffff,
1161 (int) dev
->caps
.fw_ver
& 0xffff,
1162 cmd_if_rev
, cmd
->max_cmds
);
1164 MLX4_GET(fw
->catas_offset
, outbox
, QUERY_FW_ERR_START_OFFSET
);
1165 MLX4_GET(fw
->catas_size
, outbox
, QUERY_FW_ERR_SIZE_OFFSET
);
1166 MLX4_GET(fw
->catas_bar
, outbox
, QUERY_FW_ERR_BAR_OFFSET
);
1167 fw
->catas_bar
= (fw
->catas_bar
>> 6) * 2;
1169 mlx4_dbg(dev
, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1170 (unsigned long long) fw
->catas_offset
, fw
->catas_size
, fw
->catas_bar
);
1172 MLX4_GET(fw
->fw_pages
, outbox
, QUERY_FW_SIZE_OFFSET
);
1173 MLX4_GET(fw
->clr_int_base
, outbox
, QUERY_FW_CLR_INT_BASE_OFFSET
);
1174 MLX4_GET(fw
->clr_int_bar
, outbox
, QUERY_FW_CLR_INT_BAR_OFFSET
);
1175 fw
->clr_int_bar
= (fw
->clr_int_bar
>> 6) * 2;
1177 MLX4_GET(fw
->comm_base
, outbox
, QUERY_FW_COMM_BASE_OFFSET
);
1178 MLX4_GET(fw
->comm_bar
, outbox
, QUERY_FW_COMM_BAR_OFFSET
);
1179 fw
->comm_bar
= (fw
->comm_bar
>> 6) * 2;
1180 mlx4_dbg(dev
, "Communication vector bar:%d offset:0x%llx\n",
1181 fw
->comm_bar
, fw
->comm_base
);
1182 mlx4_dbg(dev
, "FW size %d KB\n", fw
->fw_pages
>> 2);
1184 MLX4_GET(fw
->clock_offset
, outbox
, QUERY_FW_CLOCK_OFFSET
);
1185 MLX4_GET(fw
->clock_bar
, outbox
, QUERY_FW_CLOCK_BAR
);
1186 fw
->clock_bar
= (fw
->clock_bar
>> 6) * 2;
1187 mlx4_dbg(dev
, "Internal clock bar:%d offset:0x%llx\n",
1188 fw
->clock_bar
, fw
->clock_offset
);
1191 * Round up number of system pages needed in case
1192 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1195 ALIGN(fw
->fw_pages
, PAGE_SIZE
/ MLX4_ICM_PAGE_SIZE
) >>
1196 (PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
);
1198 mlx4_dbg(dev
, "Clear int @ %llx, BAR %d\n",
1199 (unsigned long long) fw
->clr_int_base
, fw
->clr_int_bar
);
1202 mlx4_free_cmd_mailbox(dev
, mailbox
);
1206 int mlx4_QUERY_FW_wrapper(struct mlx4_dev
*dev
, int slave
,
1207 struct mlx4_vhcr
*vhcr
,
1208 struct mlx4_cmd_mailbox
*inbox
,
1209 struct mlx4_cmd_mailbox
*outbox
,
1210 struct mlx4_cmd_info
*cmd
)
1215 outbuf
= outbox
->buf
;
1216 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, 0, 0, MLX4_CMD_QUERY_FW
,
1217 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1221 /* for slaves, set pci PPF ID to invalid and zero out everything
1222 * else except FW version */
1223 outbuf
[0] = outbuf
[1] = 0;
1224 memset(&outbuf
[8], 0, QUERY_FW_OUT_SIZE
- 8);
1225 outbuf
[QUERY_FW_PPF_ID
] = MLX4_INVALID_SLAVE_ID
;
1230 static void get_board_id(void *vsd
, char *board_id
)
1234 #define VSD_OFFSET_SIG1 0x00
1235 #define VSD_OFFSET_SIG2 0xde
1236 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1237 #define VSD_OFFSET_TS_BOARD_ID 0x20
1239 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1241 memset(board_id
, 0, MLX4_BOARD_ID_LEN
);
1243 if (be16_to_cpup(vsd
+ VSD_OFFSET_SIG1
) == VSD_SIGNATURE_TOPSPIN
&&
1244 be16_to_cpup(vsd
+ VSD_OFFSET_SIG2
) == VSD_SIGNATURE_TOPSPIN
) {
1245 strlcpy(board_id
, vsd
+ VSD_OFFSET_TS_BOARD_ID
, MLX4_BOARD_ID_LEN
);
1248 * The board ID is a string but the firmware byte
1249 * swaps each 4-byte word before passing it back to
1250 * us. Therefore we need to swab it before printing.
1252 for (i
= 0; i
< 4; ++i
)
1253 ((u32
*) board_id
)[i
] =
1254 swab32(*(u32
*) (vsd
+ VSD_OFFSET_MLX_BOARD_ID
+ i
* 4));
1258 int mlx4_QUERY_ADAPTER(struct mlx4_dev
*dev
, struct mlx4_adapter
*adapter
)
1260 struct mlx4_cmd_mailbox
*mailbox
;
1264 #define QUERY_ADAPTER_OUT_SIZE 0x100
1265 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1266 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1268 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1269 if (IS_ERR(mailbox
))
1270 return PTR_ERR(mailbox
);
1271 outbox
= mailbox
->buf
;
1273 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_ADAPTER
,
1274 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1278 MLX4_GET(adapter
->inta_pin
, outbox
, QUERY_ADAPTER_INTA_PIN_OFFSET
);
1280 get_board_id(outbox
+ QUERY_ADAPTER_VSD_OFFSET
/ 4,
1284 mlx4_free_cmd_mailbox(dev
, mailbox
);
1288 int mlx4_INIT_HCA(struct mlx4_dev
*dev
, struct mlx4_init_hca_param
*param
)
1290 struct mlx4_cmd_mailbox
*mailbox
;
1294 #define INIT_HCA_IN_SIZE 0x200
1295 #define INIT_HCA_VERSION_OFFSET 0x000
1296 #define INIT_HCA_VERSION 2
1297 #define INIT_HCA_VXLAN_OFFSET 0x0c
1298 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1299 #define INIT_HCA_FLAGS_OFFSET 0x014
1300 #define INIT_HCA_QPC_OFFSET 0x020
1301 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1302 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1303 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1304 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1305 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1306 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1307 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1308 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1309 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1310 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1311 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1312 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1313 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1314 #define INIT_HCA_MCAST_OFFSET 0x0c0
1315 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1316 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1317 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1318 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1319 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1320 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1321 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1322 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1323 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1324 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1325 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1326 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1327 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1328 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1329 #define INIT_HCA_TPT_OFFSET 0x0f0
1330 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1331 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1332 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1333 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1334 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1335 #define INIT_HCA_UAR_OFFSET 0x120
1336 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1337 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1339 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1340 if (IS_ERR(mailbox
))
1341 return PTR_ERR(mailbox
);
1342 inbox
= mailbox
->buf
;
1344 *((u8
*) mailbox
->buf
+ INIT_HCA_VERSION_OFFSET
) = INIT_HCA_VERSION
;
1346 *((u8
*) mailbox
->buf
+ INIT_HCA_CACHELINE_SZ_OFFSET
) =
1347 (ilog2(cache_line_size()) - 4) << 5;
1349 #if defined(__LITTLE_ENDIAN)
1350 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) &= ~cpu_to_be32(1 << 1);
1351 #elif defined(__BIG_ENDIAN)
1352 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 1);
1354 #error Host endianness not defined
1356 /* Check port for UD address vector: */
1357 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1);
1359 /* Enable IPoIB checksumming if we can: */
1360 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IPOIB_CSUM
)
1361 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 3);
1363 /* Enable QoS support if module parameter set */
1365 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 2);
1367 /* enable counters */
1368 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
)
1369 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 4);
1371 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1372 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_64B_EQE
) {
1373 *(inbox
+ INIT_HCA_EQE_CQE_OFFSETS
/ 4) |= cpu_to_be32(1 << 29);
1374 dev
->caps
.eqe_size
= 64;
1375 dev
->caps
.eqe_factor
= 1;
1377 dev
->caps
.eqe_size
= 32;
1378 dev
->caps
.eqe_factor
= 0;
1381 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_64B_CQE
) {
1382 *(inbox
+ INIT_HCA_EQE_CQE_OFFSETS
/ 4) |= cpu_to_be32(1 << 30);
1383 dev
->caps
.cqe_size
= 64;
1384 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_64B_CQE
;
1386 dev
->caps
.cqe_size
= 32;
1389 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1391 MLX4_PUT(inbox
, param
->qpc_base
, INIT_HCA_QPC_BASE_OFFSET
);
1392 MLX4_PUT(inbox
, param
->log_num_qps
, INIT_HCA_LOG_QP_OFFSET
);
1393 MLX4_PUT(inbox
, param
->srqc_base
, INIT_HCA_SRQC_BASE_OFFSET
);
1394 MLX4_PUT(inbox
, param
->log_num_srqs
, INIT_HCA_LOG_SRQ_OFFSET
);
1395 MLX4_PUT(inbox
, param
->cqc_base
, INIT_HCA_CQC_BASE_OFFSET
);
1396 MLX4_PUT(inbox
, param
->log_num_cqs
, INIT_HCA_LOG_CQ_OFFSET
);
1397 MLX4_PUT(inbox
, param
->altc_base
, INIT_HCA_ALTC_BASE_OFFSET
);
1398 MLX4_PUT(inbox
, param
->auxc_base
, INIT_HCA_AUXC_BASE_OFFSET
);
1399 MLX4_PUT(inbox
, param
->eqc_base
, INIT_HCA_EQC_BASE_OFFSET
);
1400 MLX4_PUT(inbox
, param
->log_num_eqs
, INIT_HCA_LOG_EQ_OFFSET
);
1401 MLX4_PUT(inbox
, param
->rdmarc_base
, INIT_HCA_RDMARC_BASE_OFFSET
);
1402 MLX4_PUT(inbox
, param
->log_rd_per_qp
, INIT_HCA_LOG_RD_OFFSET
);
1404 /* steering attributes */
1405 if (dev
->caps
.steering_mode
==
1406 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1407 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |=
1409 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN
);
1411 MLX4_PUT(inbox
, param
->mc_base
, INIT_HCA_FS_BASE_OFFSET
);
1412 MLX4_PUT(inbox
, param
->log_mc_entry_sz
,
1413 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET
);
1414 MLX4_PUT(inbox
, param
->log_mc_table_sz
,
1415 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET
);
1416 /* Enable Ethernet flow steering
1417 * with udp unicast and tcp unicast
1419 MLX4_PUT(inbox
, (u8
) (MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
),
1420 INIT_HCA_FS_ETH_BITS_OFFSET
);
1421 MLX4_PUT(inbox
, (u16
) MLX4_FS_NUM_OF_L2_ADDR
,
1422 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET
);
1423 /* Enable IPoIB flow steering
1424 * with udp unicast and tcp unicast
1426 MLX4_PUT(inbox
, (u8
) (MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
),
1427 INIT_HCA_FS_IB_BITS_OFFSET
);
1428 MLX4_PUT(inbox
, (u16
) MLX4_FS_NUM_OF_L2_ADDR
,
1429 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET
);
1431 MLX4_PUT(inbox
, param
->mc_base
, INIT_HCA_MC_BASE_OFFSET
);
1432 MLX4_PUT(inbox
, param
->log_mc_entry_sz
,
1433 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET
);
1434 MLX4_PUT(inbox
, param
->log_mc_hash_sz
,
1435 INIT_HCA_LOG_MC_HASH_SZ_OFFSET
);
1436 MLX4_PUT(inbox
, param
->log_mc_table_sz
,
1437 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET
);
1438 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_B0
)
1439 MLX4_PUT(inbox
, (u8
) (1 << 3),
1440 INIT_HCA_UC_STEERING_OFFSET
);
1443 /* TPT attributes */
1445 MLX4_PUT(inbox
, param
->dmpt_base
, INIT_HCA_DMPT_BASE_OFFSET
);
1446 MLX4_PUT(inbox
, param
->mw_enabled
, INIT_HCA_TPT_MW_OFFSET
);
1447 MLX4_PUT(inbox
, param
->log_mpt_sz
, INIT_HCA_LOG_MPT_SZ_OFFSET
);
1448 MLX4_PUT(inbox
, param
->mtt_base
, INIT_HCA_MTT_BASE_OFFSET
);
1449 MLX4_PUT(inbox
, param
->cmpt_base
, INIT_HCA_CMPT_BASE_OFFSET
);
1451 /* UAR attributes */
1453 MLX4_PUT(inbox
, param
->uar_page_sz
, INIT_HCA_UAR_PAGE_SZ_OFFSET
);
1454 MLX4_PUT(inbox
, param
->log_uar_sz
, INIT_HCA_LOG_UAR_SZ_OFFSET
);
1456 /* set parser VXLAN attributes */
1457 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
) {
1458 u8 parser_params
= 0;
1459 MLX4_PUT(inbox
, parser_params
, INIT_HCA_VXLAN_OFFSET
);
1462 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_INIT_HCA
, 10000,
1466 mlx4_err(dev
, "INIT_HCA returns %d\n", err
);
1468 mlx4_free_cmd_mailbox(dev
, mailbox
);
1472 int mlx4_QUERY_HCA(struct mlx4_dev
*dev
,
1473 struct mlx4_init_hca_param
*param
)
1475 struct mlx4_cmd_mailbox
*mailbox
;
1481 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1482 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1484 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1485 if (IS_ERR(mailbox
))
1486 return PTR_ERR(mailbox
);
1487 outbox
= mailbox
->buf
;
1489 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
1491 MLX4_CMD_TIME_CLASS_B
,
1492 !mlx4_is_slave(dev
));
1496 MLX4_GET(param
->global_caps
, outbox
, QUERY_HCA_GLOBAL_CAPS_OFFSET
);
1497 MLX4_GET(param
->hca_core_clock
, outbox
, QUERY_HCA_CORE_CLOCK_OFFSET
);
1499 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1501 MLX4_GET(param
->qpc_base
, outbox
, INIT_HCA_QPC_BASE_OFFSET
);
1502 MLX4_GET(param
->log_num_qps
, outbox
, INIT_HCA_LOG_QP_OFFSET
);
1503 MLX4_GET(param
->srqc_base
, outbox
, INIT_HCA_SRQC_BASE_OFFSET
);
1504 MLX4_GET(param
->log_num_srqs
, outbox
, INIT_HCA_LOG_SRQ_OFFSET
);
1505 MLX4_GET(param
->cqc_base
, outbox
, INIT_HCA_CQC_BASE_OFFSET
);
1506 MLX4_GET(param
->log_num_cqs
, outbox
, INIT_HCA_LOG_CQ_OFFSET
);
1507 MLX4_GET(param
->altc_base
, outbox
, INIT_HCA_ALTC_BASE_OFFSET
);
1508 MLX4_GET(param
->auxc_base
, outbox
, INIT_HCA_AUXC_BASE_OFFSET
);
1509 MLX4_GET(param
->eqc_base
, outbox
, INIT_HCA_EQC_BASE_OFFSET
);
1510 MLX4_GET(param
->log_num_eqs
, outbox
, INIT_HCA_LOG_EQ_OFFSET
);
1511 MLX4_GET(param
->rdmarc_base
, outbox
, INIT_HCA_RDMARC_BASE_OFFSET
);
1512 MLX4_GET(param
->log_rd_per_qp
, outbox
, INIT_HCA_LOG_RD_OFFSET
);
1514 MLX4_GET(dword_field
, outbox
, INIT_HCA_FLAGS_OFFSET
);
1515 if (dword_field
& (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN
)) {
1516 param
->steering_mode
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
1518 MLX4_GET(byte_field
, outbox
, INIT_HCA_UC_STEERING_OFFSET
);
1519 if (byte_field
& 0x8)
1520 param
->steering_mode
= MLX4_STEERING_MODE_B0
;
1522 param
->steering_mode
= MLX4_STEERING_MODE_A0
;
1524 /* steering attributes */
1525 if (param
->steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1526 MLX4_GET(param
->mc_base
, outbox
, INIT_HCA_FS_BASE_OFFSET
);
1527 MLX4_GET(param
->log_mc_entry_sz
, outbox
,
1528 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET
);
1529 MLX4_GET(param
->log_mc_table_sz
, outbox
,
1530 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET
);
1532 MLX4_GET(param
->mc_base
, outbox
, INIT_HCA_MC_BASE_OFFSET
);
1533 MLX4_GET(param
->log_mc_entry_sz
, outbox
,
1534 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET
);
1535 MLX4_GET(param
->log_mc_hash_sz
, outbox
,
1536 INIT_HCA_LOG_MC_HASH_SZ_OFFSET
);
1537 MLX4_GET(param
->log_mc_table_sz
, outbox
,
1538 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET
);
1541 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1542 MLX4_GET(byte_field
, outbox
, INIT_HCA_EQE_CQE_OFFSETS
);
1543 if (byte_field
& 0x20) /* 64-bytes eqe enabled */
1544 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_EQE_ENABLED
;
1545 if (byte_field
& 0x40) /* 64-bytes cqe enabled */
1546 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_CQE_ENABLED
;
1548 /* TPT attributes */
1550 MLX4_GET(param
->dmpt_base
, outbox
, INIT_HCA_DMPT_BASE_OFFSET
);
1551 MLX4_GET(param
->mw_enabled
, outbox
, INIT_HCA_TPT_MW_OFFSET
);
1552 MLX4_GET(param
->log_mpt_sz
, outbox
, INIT_HCA_LOG_MPT_SZ_OFFSET
);
1553 MLX4_GET(param
->mtt_base
, outbox
, INIT_HCA_MTT_BASE_OFFSET
);
1554 MLX4_GET(param
->cmpt_base
, outbox
, INIT_HCA_CMPT_BASE_OFFSET
);
1556 /* UAR attributes */
1558 MLX4_GET(param
->uar_page_sz
, outbox
, INIT_HCA_UAR_PAGE_SZ_OFFSET
);
1559 MLX4_GET(param
->log_uar_sz
, outbox
, INIT_HCA_LOG_UAR_SZ_OFFSET
);
1562 mlx4_free_cmd_mailbox(dev
, mailbox
);
1567 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1568 * and real QP0 are active, so that the paravirtualized QP0 is ready
1570 static int check_qp0_state(struct mlx4_dev
*dev
, int function
, int port
)
1572 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1573 /* irrelevant if not infiniband */
1574 if (priv
->mfunc
.master
.qp0_state
[port
].proxy_qp0_active
&&
1575 priv
->mfunc
.master
.qp0_state
[port
].qp0_active
)
1580 int mlx4_INIT_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
1581 struct mlx4_vhcr
*vhcr
,
1582 struct mlx4_cmd_mailbox
*inbox
,
1583 struct mlx4_cmd_mailbox
*outbox
,
1584 struct mlx4_cmd_info
*cmd
)
1586 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1587 int port
= vhcr
->in_modifier
;
1590 if (priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
& (1 << port
))
1593 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
) {
1594 /* Enable port only if it was previously disabled */
1595 if (!priv
->mfunc
.master
.init_port_ref
[port
]) {
1596 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1597 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1601 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1603 if (slave
== mlx4_master_func_num(dev
)) {
1604 if (check_qp0_state(dev
, slave
, port
) &&
1605 !priv
->mfunc
.master
.qp0_state
[port
].port_active
) {
1606 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1607 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1610 priv
->mfunc
.master
.qp0_state
[port
].port_active
= 1;
1611 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1614 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
1616 ++priv
->mfunc
.master
.init_port_ref
[port
];
1620 int mlx4_INIT_PORT(struct mlx4_dev
*dev
, int port
)
1622 struct mlx4_cmd_mailbox
*mailbox
;
1628 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
1629 #define INIT_PORT_IN_SIZE 256
1630 #define INIT_PORT_FLAGS_OFFSET 0x00
1631 #define INIT_PORT_FLAG_SIG (1 << 18)
1632 #define INIT_PORT_FLAG_NG (1 << 17)
1633 #define INIT_PORT_FLAG_G0 (1 << 16)
1634 #define INIT_PORT_VL_SHIFT 4
1635 #define INIT_PORT_PORT_WIDTH_SHIFT 8
1636 #define INIT_PORT_MTU_OFFSET 0x04
1637 #define INIT_PORT_MAX_GID_OFFSET 0x06
1638 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
1639 #define INIT_PORT_GUID0_OFFSET 0x10
1640 #define INIT_PORT_NODE_GUID_OFFSET 0x18
1641 #define INIT_PORT_SI_GUID_OFFSET 0x20
1643 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1644 if (IS_ERR(mailbox
))
1645 return PTR_ERR(mailbox
);
1646 inbox
= mailbox
->buf
;
1649 flags
|= (dev
->caps
.vl_cap
[port
] & 0xf) << INIT_PORT_VL_SHIFT
;
1650 flags
|= (dev
->caps
.port_width_cap
[port
] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT
;
1651 MLX4_PUT(inbox
, flags
, INIT_PORT_FLAGS_OFFSET
);
1653 field
= 128 << dev
->caps
.ib_mtu_cap
[port
];
1654 MLX4_PUT(inbox
, field
, INIT_PORT_MTU_OFFSET
);
1655 field
= dev
->caps
.gid_table_len
[port
];
1656 MLX4_PUT(inbox
, field
, INIT_PORT_MAX_GID_OFFSET
);
1657 field
= dev
->caps
.pkey_table_len
[port
];
1658 MLX4_PUT(inbox
, field
, INIT_PORT_MAX_PKEY_OFFSET
);
1660 err
= mlx4_cmd(dev
, mailbox
->dma
, port
, 0, MLX4_CMD_INIT_PORT
,
1661 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1663 mlx4_free_cmd_mailbox(dev
, mailbox
);
1665 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
1666 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
1670 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT
);
1672 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
1673 struct mlx4_vhcr
*vhcr
,
1674 struct mlx4_cmd_mailbox
*inbox
,
1675 struct mlx4_cmd_mailbox
*outbox
,
1676 struct mlx4_cmd_info
*cmd
)
1678 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1679 int port
= vhcr
->in_modifier
;
1682 if (!(priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&
1686 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
) {
1687 if (priv
->mfunc
.master
.init_port_ref
[port
] == 1) {
1688 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
1689 1000, MLX4_CMD_NATIVE
);
1693 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
1695 /* infiniband port */
1696 if (slave
== mlx4_master_func_num(dev
)) {
1697 if (!priv
->mfunc
.master
.qp0_state
[port
].qp0_active
&&
1698 priv
->mfunc
.master
.qp0_state
[port
].port_active
) {
1699 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
1700 1000, MLX4_CMD_NATIVE
);
1703 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
1704 priv
->mfunc
.master
.qp0_state
[port
].port_active
= 0;
1707 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
1709 --priv
->mfunc
.master
.init_port_ref
[port
];
1713 int mlx4_CLOSE_PORT(struct mlx4_dev
*dev
, int port
)
1715 return mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
, 1000,
1718 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT
);
1720 int mlx4_CLOSE_HCA(struct mlx4_dev
*dev
, int panic
)
1722 return mlx4_cmd(dev
, 0, 0, panic
, MLX4_CMD_CLOSE_HCA
, 1000,
1726 int mlx4_SET_ICM_SIZE(struct mlx4_dev
*dev
, u64 icm_size
, u64
*aux_pages
)
1728 int ret
= mlx4_cmd_imm(dev
, icm_size
, aux_pages
, 0, 0,
1729 MLX4_CMD_SET_ICM_SIZE
,
1730 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1735 * Round up number of system pages needed in case
1736 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1738 *aux_pages
= ALIGN(*aux_pages
, PAGE_SIZE
/ MLX4_ICM_PAGE_SIZE
) >>
1739 (PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
);
1744 int mlx4_NOP(struct mlx4_dev
*dev
)
1746 /* Input modifier of 0x1f means "finish as soon as possible." */
1747 return mlx4_cmd(dev
, 0, 0x1f, 0, MLX4_CMD_NOP
, 100, MLX4_CMD_NATIVE
);
1750 int mlx4_get_phys_port_id(struct mlx4_dev
*dev
)
1754 struct mlx4_cmd_mailbox
*mailbox
;
1756 u32 guid_hi
, guid_lo
;
1758 #define MOD_STAT_CFG_PORT_OFFSET 8
1759 #define MOD_STAT_CFG_GUID_H 0X14
1760 #define MOD_STAT_CFG_GUID_L 0X1c
1762 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1763 if (IS_ERR(mailbox
))
1764 return PTR_ERR(mailbox
);
1765 outbox
= mailbox
->buf
;
1767 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
1768 in_mod
= port
<< MOD_STAT_CFG_PORT_OFFSET
;
1769 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, in_mod
, 0x2,
1770 MLX4_CMD_MOD_STAT_CFG
, MLX4_CMD_TIME_CLASS_A
,
1773 mlx4_err(dev
, "Fail to get port %d uplink guid\n",
1777 MLX4_GET(guid_hi
, outbox
, MOD_STAT_CFG_GUID_H
);
1778 MLX4_GET(guid_lo
, outbox
, MOD_STAT_CFG_GUID_L
);
1779 dev
->caps
.phys_port_id
[port
] = (u64
)guid_lo
|
1783 mlx4_free_cmd_mailbox(dev
, mailbox
);
1787 #define MLX4_WOL_SETUP_MODE (5 << 28)
1788 int mlx4_wol_read(struct mlx4_dev
*dev
, u64
*config
, int port
)
1790 u32 in_mod
= MLX4_WOL_SETUP_MODE
| port
<< 8;
1792 return mlx4_cmd_imm(dev
, 0, config
, in_mod
, 0x3,
1793 MLX4_CMD_MOD_STAT_CFG
, MLX4_CMD_TIME_CLASS_A
,
1796 EXPORT_SYMBOL_GPL(mlx4_wol_read
);
1798 int mlx4_wol_write(struct mlx4_dev
*dev
, u64 config
, int port
)
1800 u32 in_mod
= MLX4_WOL_SETUP_MODE
| port
<< 8;
1802 return mlx4_cmd(dev
, config
, in_mod
, 0x1, MLX4_CMD_MOD_STAT_CFG
,
1803 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1805 EXPORT_SYMBOL_GPL(mlx4_wol_write
);
1812 void mlx4_opreq_action(struct work_struct
*work
)
1814 struct mlx4_priv
*priv
= container_of(work
, struct mlx4_priv
,
1816 struct mlx4_dev
*dev
= &priv
->dev
;
1817 int num_tasks
= atomic_read(&priv
->opreq_count
);
1818 struct mlx4_cmd_mailbox
*mailbox
;
1819 struct mlx4_mgm
*mgm
;
1831 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
1832 #define GET_OP_REQ_TOKEN_OFFSET 0x14
1833 #define GET_OP_REQ_TYPE_OFFSET 0x1a
1834 #define GET_OP_REQ_DATA_OFFSET 0x20
1836 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1837 if (IS_ERR(mailbox
)) {
1838 mlx4_err(dev
, "Failed to allocate mailbox for GET_OP_REQ\n");
1841 outbox
= mailbox
->buf
;
1844 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
1845 MLX4_CMD_GET_OP_REQ
, MLX4_CMD_TIME_CLASS_A
,
1848 mlx4_err(dev
, "Failed to retrieve required operation: %d\n",
1852 MLX4_GET(modifier
, outbox
, GET_OP_REQ_MODIFIER_OFFSET
);
1853 MLX4_GET(token
, outbox
, GET_OP_REQ_TOKEN_OFFSET
);
1854 MLX4_GET(type
, outbox
, GET_OP_REQ_TYPE_OFFSET
);
1859 if (dev
->caps
.steering_mode
==
1860 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1861 mlx4_warn(dev
, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
1865 mgm
= (struct mlx4_mgm
*)((u8
*)(outbox
) +
1866 GET_OP_REQ_DATA_OFFSET
);
1867 num_qps
= be32_to_cpu(mgm
->members_count
) &
1869 rem_mcg
= ((u8
*)(&mgm
->members_count
))[0] & 1;
1870 prot
= ((u8
*)(&mgm
->members_count
))[0] >> 6;
1872 for (i
= 0; i
< num_qps
; i
++) {
1873 qp
.qpn
= be32_to_cpu(mgm
->qp
[i
]);
1875 err
= mlx4_multicast_detach(dev
, &qp
,
1879 err
= mlx4_multicast_attach(dev
, &qp
,
1889 mlx4_warn(dev
, "Bad type for required operation\n");
1893 err
= mlx4_cmd(dev
, 0, ((u32
) err
| cpu_to_be32(token
) << 16),
1894 1, MLX4_CMD_GET_OP_REQ
, MLX4_CMD_TIME_CLASS_A
,
1897 mlx4_err(dev
, "Failed to acknowledge required request: %d\n",
1901 memset(outbox
, 0, 0xffc);
1902 num_tasks
= atomic_dec_return(&priv
->opreq_count
);
1906 mlx4_free_cmd_mailbox(dev
, mailbox
);