1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/netdevice.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/interrupt.h>
16 #include <linux/pci.h>
17 #include <linux/etherdevice.h>
18 #include <rdma/ib_verbs.h>
23 #ifdef CONFIG_BNXT_DCB
24 static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt
*bp
, struct ieee_ets
*ets
)
26 struct hwrm_queue_pri2cos_cfg_input req
= {0};
30 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_PRI2COS_CFG
, -1, -1);
31 req
.flags
= cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
|
32 QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN
);
34 pri2cos
= &req
.pri0_cos_queue_id
;
35 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
36 req
.enables
|= cpu_to_le32(
37 QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID
<< i
);
39 pri2cos
[i
] = bp
->q_info
[ets
->prio_tc
[i
]].queue_id
;
41 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
45 static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt
*bp
, struct ieee_ets
*ets
)
47 struct hwrm_queue_pri2cos_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
48 struct hwrm_queue_pri2cos_qcfg_input req
= {0};
51 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_PRI2COS_QCFG
, -1, -1);
52 req
.flags
= cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN
);
53 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
55 u8
*pri2cos
= &resp
->pri0_cos_queue_id
;
58 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
59 u8 queue_id
= pri2cos
[i
];
61 for (j
= 0; j
< bp
->max_tc
; j
++) {
62 if (bp
->q_info
[j
].queue_id
== queue_id
) {
72 static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt
*bp
, struct ieee_ets
*ets
,
75 struct hwrm_queue_cos2bw_cfg_input req
= {0};
76 struct bnxt_cos2bw_cfg cos2bw
;
80 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_COS2BW_CFG
, -1, -1);
82 for (i
= 0; i
< max_tc
; i
++, data
+= sizeof(cos2bw
) - 4) {
83 req
.enables
|= cpu_to_le32(
84 QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID
<< i
);
86 memset(&cos2bw
, 0, sizeof(cos2bw
));
87 cos2bw
.queue_id
= bp
->q_info
[i
].queue_id
;
88 if (ets
->tc_tsa
[i
] == IEEE_8021QAZ_TSA_STRICT
) {
90 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP
;
94 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS
;
95 cos2bw
.bw_weight
= ets
->tc_tx_bw
[i
];
97 memcpy(data
, &cos2bw
.queue_id
, sizeof(cos2bw
) - 4);
99 req
.queue_id0
= cos2bw
.queue_id
;
103 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
107 static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt
*bp
, struct ieee_ets
*ets
)
109 struct hwrm_queue_cos2bw_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
110 struct hwrm_queue_cos2bw_qcfg_input req
= {0};
111 struct bnxt_cos2bw_cfg cos2bw
;
115 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_COS2BW_QCFG
, -1, -1);
116 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
120 data
= &resp
->queue_id0
+ offsetof(struct bnxt_cos2bw_cfg
, queue_id
);
121 for (i
= 0; i
< bp
->max_tc
; i
++, data
+= sizeof(cos2bw
) - 4) {
124 memcpy(&cos2bw
.queue_id
, data
, sizeof(cos2bw
) - 4);
126 cos2bw
.queue_id
= resp
->queue_id0
;
128 for (j
= 0; j
< bp
->max_tc
; j
++) {
129 if (bp
->q_info
[j
].queue_id
!= cos2bw
.queue_id
)
132 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP
) {
133 ets
->tc_tsa
[j
] = IEEE_8021QAZ_TSA_STRICT
;
135 ets
->tc_tsa
[j
] = IEEE_8021QAZ_TSA_ETS
;
136 ets
->tc_tx_bw
[j
] = cos2bw
.bw_weight
;
143 static int bnxt_hwrm_queue_cfg(struct bnxt
*bp
, unsigned int lltc_mask
)
145 struct hwrm_queue_cfg_input req
= {0};
148 if (netif_running(bp
->dev
))
151 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_CFG
, -1, -1);
152 req
.flags
= cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
);
153 req
.enables
= cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE
);
155 /* Configure lossless queues to lossy first */
156 req
.service_profile
= QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY
;
157 for (i
= 0; i
< bp
->max_tc
; i
++) {
158 if (BNXT_LLQ(bp
->q_info
[i
].queue_profile
)) {
159 req
.queue_id
= cpu_to_le32(bp
->q_info
[i
].queue_id
);
160 hwrm_send_message(bp
, &req
, sizeof(req
),
162 bp
->q_info
[i
].queue_profile
=
163 QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY
;
167 /* Now configure desired queues to lossless */
168 req
.service_profile
= QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS
;
169 for (i
= 0; i
< bp
->max_tc
; i
++) {
170 if (lltc_mask
& (1 << i
)) {
171 req
.queue_id
= cpu_to_le32(bp
->q_info
[i
].queue_id
);
172 hwrm_send_message(bp
, &req
, sizeof(req
),
174 bp
->q_info
[i
].queue_profile
=
175 QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS
;
178 if (netif_running(bp
->dev
))
184 static int bnxt_hwrm_queue_pfc_cfg(struct bnxt
*bp
, struct ieee_pfc
*pfc
)
186 struct hwrm_queue_pfcenable_cfg_input req
= {0};
187 struct ieee_ets
*my_ets
= bp
->ieee_ets
;
188 unsigned int tc_mask
= 0, pri_mask
= 0;
189 u8 i
, pri
, lltc_count
= 0;
190 bool need_q_recfg
= false;
196 for (i
= 0; i
< bp
->max_tc
; i
++) {
197 for (pri
= 0; pri
< IEEE_8021QAZ_MAX_TCS
; pri
++) {
198 if ((pfc
->pfc_en
& (1 << pri
)) &&
199 (my_ets
->prio_tc
[pri
] == i
)) {
200 pri_mask
|= 1 << pri
;
204 if (tc_mask
& (1 << i
))
207 if (lltc_count
> bp
->max_lltc
)
210 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_PFCENABLE_CFG
, -1, -1);
211 req
.flags
= cpu_to_le32(pri_mask
);
212 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
216 for (i
= 0; i
< bp
->max_tc
; i
++) {
217 if (tc_mask
& (1 << i
)) {
218 if (!BNXT_LLQ(bp
->q_info
[i
].queue_profile
))
224 rc
= bnxt_hwrm_queue_cfg(bp
, tc_mask
);
229 static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt
*bp
, struct ieee_pfc
*pfc
)
231 struct hwrm_queue_pfcenable_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
232 struct hwrm_queue_pfcenable_qcfg_input req
= {0};
236 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_PFCENABLE_QCFG
, -1, -1);
237 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
241 pri_mask
= le32_to_cpu(resp
->flags
);
242 pfc
->pfc_en
= pri_mask
;
246 static int bnxt_hwrm_set_dcbx_app(struct bnxt
*bp
, struct dcb_app
*app
,
249 struct hwrm_fw_set_structured_data_input set
= {0};
250 struct hwrm_fw_get_structured_data_input get
= {0};
251 struct hwrm_struct_data_dcbx_app
*fw_app
;
252 struct hwrm_struct_hdr
*data
;
257 if (bp
->hwrm_spec_code
< 0x10601)
260 n
= IEEE_8021QAZ_MAX_TCS
;
261 data_len
= sizeof(*data
) + sizeof(*fw_app
) * n
;
262 data
= dma_alloc_coherent(&bp
->pdev
->dev
, data_len
, &mapping
,
267 memset(data
, 0, data_len
);
268 bnxt_hwrm_cmd_hdr_init(bp
, &get
, HWRM_FW_GET_STRUCTURED_DATA
, -1, -1);
269 get
.dest_data_addr
= cpu_to_le64(mapping
);
270 get
.structure_id
= cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP
);
271 get
.subtype
= cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL
);
273 rc
= hwrm_send_message(bp
, &get
, sizeof(get
), HWRM_CMD_TIMEOUT
);
277 fw_app
= (struct hwrm_struct_data_dcbx_app
*)(data
+ 1);
279 if (data
->struct_id
!= cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP
)) {
285 for (i
= 0; i
< n
; i
++, fw_app
++) {
286 if (fw_app
->protocol_id
== cpu_to_be16(app
->protocol
) &&
287 fw_app
->protocol_selector
== app
->selector
&&
288 fw_app
->priority
== app
->priority
) {
298 fw_app
->protocol_id
= cpu_to_be16(app
->protocol
);
299 fw_app
->protocol_selector
= app
->selector
;
300 fw_app
->priority
= app
->priority
;
305 /* not found, nothing to delete */
309 len
= (n
- 1 - i
) * sizeof(*fw_app
);
311 memmove(fw_app
, fw_app
+ 1, len
);
313 memset(fw_app
+ n
, 0, sizeof(*fw_app
));
316 data
->len
= cpu_to_le16(sizeof(*fw_app
) * n
);
317 data
->subtype
= cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL
);
319 bnxt_hwrm_cmd_hdr_init(bp
, &set
, HWRM_FW_SET_STRUCTURED_DATA
, -1, -1);
320 set
.src_data_addr
= cpu_to_le64(mapping
);
321 set
.data_len
= cpu_to_le16(sizeof(*data
) + sizeof(*fw_app
) * n
);
323 rc
= hwrm_send_message(bp
, &set
, sizeof(set
), HWRM_CMD_TIMEOUT
);
328 dma_free_coherent(&bp
->pdev
->dev
, data_len
, data
, mapping
);
332 static int bnxt_ets_validate(struct bnxt
*bp
, struct ieee_ets
*ets
, u8
*tc
)
334 int total_ets_bw
= 0;
338 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
339 if (ets
->prio_tc
[i
] > bp
->max_tc
) {
340 netdev_err(bp
->dev
, "priority to TC mapping exceeds TC count %d\n",
344 if (ets
->prio_tc
[i
] > max_tc
)
345 max_tc
= ets
->prio_tc
[i
];
347 if ((ets
->tc_tx_bw
[i
] || ets
->tc_tsa
[i
]) && i
> bp
->max_tc
)
350 switch (ets
->tc_tsa
[i
]) {
351 case IEEE_8021QAZ_TSA_STRICT
:
353 case IEEE_8021QAZ_TSA_ETS
:
354 total_ets_bw
+= ets
->tc_tx_bw
[i
];
360 if (total_ets_bw
> 100)
367 static int bnxt_dcbnl_ieee_getets(struct net_device
*dev
, struct ieee_ets
*ets
)
369 struct bnxt
*bp
= netdev_priv(dev
);
370 struct ieee_ets
*my_ets
= bp
->ieee_ets
;
372 ets
->ets_cap
= bp
->max_tc
;
377 if (bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
)
380 my_ets
= kzalloc(sizeof(*my_ets
), GFP_KERNEL
);
383 rc
= bnxt_hwrm_queue_cos2bw_qcfg(bp
, my_ets
);
386 rc
= bnxt_hwrm_queue_pri2cos_qcfg(bp
, my_ets
);
391 ets
->cbs
= my_ets
->cbs
;
392 memcpy(ets
->tc_tx_bw
, my_ets
->tc_tx_bw
, sizeof(ets
->tc_tx_bw
));
393 memcpy(ets
->tc_rx_bw
, my_ets
->tc_rx_bw
, sizeof(ets
->tc_rx_bw
));
394 memcpy(ets
->tc_tsa
, my_ets
->tc_tsa
, sizeof(ets
->tc_tsa
));
395 memcpy(ets
->prio_tc
, my_ets
->prio_tc
, sizeof(ets
->prio_tc
));
399 static int bnxt_dcbnl_ieee_setets(struct net_device
*dev
, struct ieee_ets
*ets
)
401 struct bnxt
*bp
= netdev_priv(dev
);
402 struct ieee_ets
*my_ets
= bp
->ieee_ets
;
406 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
407 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
410 rc
= bnxt_ets_validate(bp
, ets
, &max_tc
);
413 my_ets
= kzalloc(sizeof(*my_ets
), GFP_KERNEL
);
416 /* initialize PRI2TC mappings to invalid value */
417 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
418 my_ets
->prio_tc
[i
] = IEEE_8021QAZ_MAX_TCS
;
419 bp
->ieee_ets
= my_ets
;
421 rc
= bnxt_setup_mq_tc(dev
, max_tc
);
424 rc
= bnxt_hwrm_queue_cos2bw_cfg(bp
, ets
, max_tc
);
427 rc
= bnxt_hwrm_queue_pri2cos_cfg(bp
, ets
);
430 memcpy(my_ets
, ets
, sizeof(*my_ets
));
435 static int bnxt_dcbnl_ieee_getpfc(struct net_device
*dev
, struct ieee_pfc
*pfc
)
437 struct bnxt
*bp
= netdev_priv(dev
);
438 __le64
*stats
= (__le64
*)bp
->hw_rx_port_stats
;
439 struct ieee_pfc
*my_pfc
= bp
->ieee_pfc
;
443 pfc
->pfc_cap
= bp
->max_lltc
;
446 if (bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
)
449 my_pfc
= kzalloc(sizeof(*my_pfc
), GFP_KERNEL
);
452 bp
->ieee_pfc
= my_pfc
;
453 rc
= bnxt_hwrm_queue_pfc_qcfg(bp
, my_pfc
);
458 pfc
->pfc_en
= my_pfc
->pfc_en
;
459 pfc
->mbc
= my_pfc
->mbc
;
460 pfc
->delay
= my_pfc
->delay
;
465 rx_off
= BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0
);
466 tx_off
= BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0
);
467 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++, rx_off
++, tx_off
++) {
468 pfc
->requests
[i
] = le64_to_cpu(*(stats
+ tx_off
));
469 pfc
->indications
[i
] = le64_to_cpu(*(stats
+ rx_off
));
475 static int bnxt_dcbnl_ieee_setpfc(struct net_device
*dev
, struct ieee_pfc
*pfc
)
477 struct bnxt
*bp
= netdev_priv(dev
);
478 struct ieee_pfc
*my_pfc
= bp
->ieee_pfc
;
481 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
482 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
486 my_pfc
= kzalloc(sizeof(*my_pfc
), GFP_KERNEL
);
489 bp
->ieee_pfc
= my_pfc
;
491 rc
= bnxt_hwrm_queue_pfc_cfg(bp
, pfc
);
493 memcpy(my_pfc
, pfc
, sizeof(*my_pfc
));
498 static int bnxt_dcbnl_ieee_setapp(struct net_device
*dev
, struct dcb_app
*app
)
500 struct bnxt
*bp
= netdev_priv(dev
);
503 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
504 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
507 rc
= dcb_ieee_setapp(dev
, app
);
511 if ((app
->selector
== IEEE_8021QAZ_APP_SEL_ETHERTYPE
&&
512 app
->protocol
== ETH_P_IBOE
) ||
513 (app
->selector
== IEEE_8021QAZ_APP_SEL_DGRAM
&&
514 app
->protocol
== ROCE_V2_UDP_DPORT
))
515 rc
= bnxt_hwrm_set_dcbx_app(bp
, app
, true);
520 static int bnxt_dcbnl_ieee_delapp(struct net_device
*dev
, struct dcb_app
*app
)
522 struct bnxt
*bp
= netdev_priv(dev
);
525 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
526 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
529 rc
= dcb_ieee_delapp(dev
, app
);
532 if ((app
->selector
== IEEE_8021QAZ_APP_SEL_ETHERTYPE
&&
533 app
->protocol
== ETH_P_IBOE
) ||
534 (app
->selector
== IEEE_8021QAZ_APP_SEL_DGRAM
&&
535 app
->protocol
== ROCE_V2_UDP_DPORT
))
536 rc
= bnxt_hwrm_set_dcbx_app(bp
, app
, false);
541 static u8
bnxt_dcbnl_getdcbx(struct net_device
*dev
)
543 struct bnxt
*bp
= netdev_priv(dev
);
548 static u8
bnxt_dcbnl_setdcbx(struct net_device
*dev
, u8 mode
)
550 struct bnxt
*bp
= netdev_priv(dev
);
552 /* only support IEEE */
553 if ((mode
& DCB_CAP_DCBX_VER_CEE
) || !(mode
& DCB_CAP_DCBX_VER_IEEE
))
556 if ((mode
& DCB_CAP_DCBX_HOST
) && BNXT_VF(bp
))
559 if (mode
== bp
->dcbx_cap
)
566 static const struct dcbnl_rtnl_ops dcbnl_ops
= {
567 .ieee_getets
= bnxt_dcbnl_ieee_getets
,
568 .ieee_setets
= bnxt_dcbnl_ieee_setets
,
569 .ieee_getpfc
= bnxt_dcbnl_ieee_getpfc
,
570 .ieee_setpfc
= bnxt_dcbnl_ieee_setpfc
,
571 .ieee_setapp
= bnxt_dcbnl_ieee_setapp
,
572 .ieee_delapp
= bnxt_dcbnl_ieee_delapp
,
573 .getdcbx
= bnxt_dcbnl_getdcbx
,
574 .setdcbx
= bnxt_dcbnl_setdcbx
,
577 void bnxt_dcb_init(struct bnxt
*bp
)
579 if (bp
->hwrm_spec_code
< 0x10501)
582 bp
->dcbx_cap
= DCB_CAP_DCBX_VER_IEEE
;
583 if (BNXT_PF(bp
) && !(bp
->flags
& BNXT_FLAG_FW_LLDP_AGENT
))
584 bp
->dcbx_cap
|= DCB_CAP_DCBX_HOST
;
586 bp
->dcbx_cap
|= DCB_CAP_DCBX_LLD_MANAGED
;
587 bp
->dev
->dcbnl_ops
= &dcbnl_ops
;
590 void bnxt_dcb_free(struct bnxt
*bp
)
600 void bnxt_dcb_init(struct bnxt
*bp
)
604 void bnxt_dcb_free(struct bnxt
*bp
)