]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
ASoC: wm_adsp: add support for DSP region lock
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_dcb.c
1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10 #include <linux/netdevice.h>
11 #include <linux/types.h>
12 #include <linux/errno.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/etherdevice.h>
17 #include "bnxt_hsi.h"
18 #include "bnxt.h"
19 #include "bnxt_dcb.h"
20
21 #ifdef CONFIG_BNXT_DCB
22 static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
23 {
24 struct hwrm_queue_pri2cos_cfg_input req = {0};
25 int rc = 0, i;
26 u8 *pri2cos;
27
28 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
29 req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
30 QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
31
32 pri2cos = &req.pri0_cos_queue_id;
33 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
34 req.enables |= cpu_to_le32(
35 QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
36
37 pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id;
38 }
39 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
40 return rc;
41 }
42
43 static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
44 {
45 struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
46 struct hwrm_queue_pri2cos_qcfg_input req = {0};
47 int rc = 0;
48
49 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
50 req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
51 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
52 if (!rc) {
53 u8 *pri2cos = &resp->pri0_cos_queue_id;
54 int i, j;
55
56 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
57 u8 queue_id = pri2cos[i];
58
59 for (j = 0; j < bp->max_tc; j++) {
60 if (bp->q_info[j].queue_id == queue_id) {
61 ets->prio_tc[i] = j;
62 break;
63 }
64 }
65 }
66 }
67 return rc;
68 }
69
70 static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
71 u8 max_tc)
72 {
73 struct hwrm_queue_cos2bw_cfg_input req = {0};
74 struct bnxt_cos2bw_cfg cos2bw;
75 int rc = 0, i;
76 void *data;
77
78 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
79 data = &req.unused_0;
80 for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) {
81 req.enables |= cpu_to_le32(
82 QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
83
84 memset(&cos2bw, 0, sizeof(cos2bw));
85 cos2bw.queue_id = bp->q_info[i].queue_id;
86 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
87 cos2bw.tsa =
88 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
89 cos2bw.pri_lvl = i;
90 } else {
91 cos2bw.tsa =
92 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS;
93 cos2bw.bw_weight = ets->tc_tx_bw[i];
94 }
95 memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
96 if (i == 0) {
97 req.queue_id0 = cos2bw.queue_id;
98 req.unused_0 = 0;
99 }
100 }
101 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
102 return rc;
103 }
104
105 static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
106 {
107 struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
108 struct hwrm_queue_cos2bw_qcfg_input req = {0};
109 struct bnxt_cos2bw_cfg cos2bw;
110 void *data;
111 int rc, i;
112
113 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
114 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
115 if (rc)
116 return rc;
117
118 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
119 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
120 int j;
121
122 memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
123 if (i == 0)
124 cos2bw.queue_id = resp->queue_id0;
125
126 for (j = 0; j < bp->max_tc; j++) {
127 if (bp->q_info[j].queue_id != cos2bw.queue_id)
128 continue;
129 if (cos2bw.tsa ==
130 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
131 ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT;
132 } else {
133 ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS;
134 ets->tc_tx_bw[j] = cos2bw.bw_weight;
135 }
136 }
137 }
138 return 0;
139 }
140
141 static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask)
142 {
143 struct hwrm_queue_cfg_input req = {0};
144 int i;
145
146 if (netif_running(bp->dev))
147 bnxt_tx_disable(bp);
148
149 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1);
150 req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR);
151 req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE);
152
153 /* Configure lossless queues to lossy first */
154 req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
155 for (i = 0; i < bp->max_tc; i++) {
156 if (BNXT_LLQ(bp->q_info[i].queue_profile)) {
157 req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
158 hwrm_send_message(bp, &req, sizeof(req),
159 HWRM_CMD_TIMEOUT);
160 bp->q_info[i].queue_profile =
161 QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
162 }
163 }
164
165 /* Now configure desired queues to lossless */
166 req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
167 for (i = 0; i < bp->max_tc; i++) {
168 if (lltc_mask & (1 << i)) {
169 req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
170 hwrm_send_message(bp, &req, sizeof(req),
171 HWRM_CMD_TIMEOUT);
172 bp->q_info[i].queue_profile =
173 QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
174 }
175 }
176 if (netif_running(bp->dev))
177 bnxt_tx_enable(bp);
178
179 return 0;
180 }
181
182 static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
183 {
184 struct hwrm_queue_pfcenable_cfg_input req = {0};
185 struct ieee_ets *my_ets = bp->ieee_ets;
186 unsigned int tc_mask = 0, pri_mask = 0;
187 u8 i, pri, lltc_count = 0;
188 bool need_q_recfg = false;
189 int rc;
190
191 if (!my_ets)
192 return -EINVAL;
193
194 for (i = 0; i < bp->max_tc; i++) {
195 for (pri = 0; pri < IEEE_8021QAZ_MAX_TCS; pri++) {
196 if ((pfc->pfc_en & (1 << pri)) &&
197 (my_ets->prio_tc[pri] == i)) {
198 pri_mask |= 1 << pri;
199 tc_mask |= 1 << i;
200 }
201 }
202 if (tc_mask & (1 << i))
203 lltc_count++;
204 }
205 if (lltc_count > bp->max_lltc)
206 return -EINVAL;
207
208 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
209 req.flags = cpu_to_le32(pri_mask);
210 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
211 if (rc)
212 return rc;
213
214 for (i = 0; i < bp->max_tc; i++) {
215 if (tc_mask & (1 << i)) {
216 if (!BNXT_LLQ(bp->q_info[i].queue_profile))
217 need_q_recfg = true;
218 }
219 }
220
221 if (need_q_recfg)
222 rc = bnxt_hwrm_queue_cfg(bp, tc_mask);
223
224 return rc;
225 }
226
227 static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
228 {
229 struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
230 struct hwrm_queue_pfcenable_qcfg_input req = {0};
231 u8 pri_mask;
232 int rc;
233
234 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
235 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
236 if (rc)
237 return rc;
238
239 pri_mask = le32_to_cpu(resp->flags);
240 pfc->pfc_en = pri_mask;
241 return 0;
242 }
243
244 static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
245 {
246 int total_ets_bw = 0;
247 u8 max_tc = 0;
248 int i;
249
250 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
251 if (ets->prio_tc[i] > bp->max_tc) {
252 netdev_err(bp->dev, "priority to TC mapping exceeds TC count %d\n",
253 ets->prio_tc[i]);
254 return -EINVAL;
255 }
256 if (ets->prio_tc[i] > max_tc)
257 max_tc = ets->prio_tc[i];
258
259 if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
260 return -EINVAL;
261
262 switch (ets->tc_tsa[i]) {
263 case IEEE_8021QAZ_TSA_STRICT:
264 break;
265 case IEEE_8021QAZ_TSA_ETS:
266 total_ets_bw += ets->tc_tx_bw[i];
267 break;
268 default:
269 return -ENOTSUPP;
270 }
271 }
272 if (total_ets_bw > 100)
273 return -EINVAL;
274
275 *tc = max_tc + 1;
276 return 0;
277 }
278
279 static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
280 {
281 struct bnxt *bp = netdev_priv(dev);
282 struct ieee_ets *my_ets = bp->ieee_ets;
283
284 ets->ets_cap = bp->max_tc;
285
286 if (!my_ets) {
287 int rc;
288
289 if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
290 return 0;
291
292 my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
293 if (!my_ets)
294 return 0;
295 rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
296 if (rc)
297 return 0;
298 rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
299 if (rc)
300 return 0;
301 }
302
303 ets->cbs = my_ets->cbs;
304 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
305 memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
306 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
307 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
308 return 0;
309 }
310
311 static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
312 {
313 struct bnxt *bp = netdev_priv(dev);
314 struct ieee_ets *my_ets = bp->ieee_ets;
315 u8 max_tc = 0;
316 int rc, i;
317
318 if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
319 !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
320 return -EINVAL;
321
322 rc = bnxt_ets_validate(bp, ets, &max_tc);
323 if (!rc) {
324 if (!my_ets) {
325 my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
326 if (!my_ets)
327 return -ENOMEM;
328 /* initialize PRI2TC mappings to invalid value */
329 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
330 my_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS;
331 bp->ieee_ets = my_ets;
332 }
333 rc = bnxt_setup_mq_tc(dev, max_tc);
334 if (rc)
335 return rc;
336 rc = bnxt_hwrm_queue_cos2bw_cfg(bp, ets, max_tc);
337 if (rc)
338 return rc;
339 rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets);
340 if (rc)
341 return rc;
342 memcpy(my_ets, ets, sizeof(*my_ets));
343 }
344 return rc;
345 }
346
347 static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
348 {
349 struct bnxt *bp = netdev_priv(dev);
350 __le64 *stats = (__le64 *)bp->hw_rx_port_stats;
351 struct ieee_pfc *my_pfc = bp->ieee_pfc;
352 long rx_off, tx_off;
353 int i, rc;
354
355 pfc->pfc_cap = bp->max_lltc;
356
357 if (!my_pfc) {
358 if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
359 return 0;
360
361 my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
362 if (!my_pfc)
363 return 0;
364 bp->ieee_pfc = my_pfc;
365 rc = bnxt_hwrm_queue_pfc_qcfg(bp, my_pfc);
366 if (rc)
367 return 0;
368 }
369
370 pfc->pfc_en = my_pfc->pfc_en;
371 pfc->mbc = my_pfc->mbc;
372 pfc->delay = my_pfc->delay;
373
374 if (!stats)
375 return 0;
376
377 rx_off = BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0);
378 tx_off = BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0);
379 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++, rx_off++, tx_off++) {
380 pfc->requests[i] = le64_to_cpu(*(stats + tx_off));
381 pfc->indications[i] = le64_to_cpu(*(stats + rx_off));
382 }
383
384 return 0;
385 }
386
387 static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
388 {
389 struct bnxt *bp = netdev_priv(dev);
390 struct ieee_pfc *my_pfc = bp->ieee_pfc;
391 int rc;
392
393 if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
394 !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
395 return -EINVAL;
396
397 if (!my_pfc) {
398 my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
399 if (!my_pfc)
400 return -ENOMEM;
401 bp->ieee_pfc = my_pfc;
402 }
403 rc = bnxt_hwrm_queue_pfc_cfg(bp, pfc);
404 if (!rc)
405 memcpy(my_pfc, pfc, sizeof(*my_pfc));
406
407 return rc;
408 }
409
410 static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
411 {
412 struct bnxt *bp = netdev_priv(dev);
413 int rc = -EINVAL;
414
415 if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
416 !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
417 return -EINVAL;
418
419 rc = dcb_ieee_setapp(dev, app);
420 return rc;
421 }
422
423 static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
424 {
425 struct bnxt *bp = netdev_priv(dev);
426 int rc;
427
428 if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
429 return -EINVAL;
430
431 rc = dcb_ieee_delapp(dev, app);
432 return rc;
433 }
434
435 static u8 bnxt_dcbnl_getdcbx(struct net_device *dev)
436 {
437 struct bnxt *bp = netdev_priv(dev);
438
439 return bp->dcbx_cap;
440 }
441
442 static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
443 {
444 struct bnxt *bp = netdev_priv(dev);
445
446 /* only support IEEE */
447 if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE))
448 return 1;
449
450 if ((mode & DCB_CAP_DCBX_HOST) && BNXT_VF(bp))
451 return 1;
452
453 if (mode == bp->dcbx_cap)
454 return 0;
455
456 bp->dcbx_cap = mode;
457 return 0;
458 }
459
460 static const struct dcbnl_rtnl_ops dcbnl_ops = {
461 .ieee_getets = bnxt_dcbnl_ieee_getets,
462 .ieee_setets = bnxt_dcbnl_ieee_setets,
463 .ieee_getpfc = bnxt_dcbnl_ieee_getpfc,
464 .ieee_setpfc = bnxt_dcbnl_ieee_setpfc,
465 .ieee_setapp = bnxt_dcbnl_ieee_setapp,
466 .ieee_delapp = bnxt_dcbnl_ieee_delapp,
467 .getdcbx = bnxt_dcbnl_getdcbx,
468 .setdcbx = bnxt_dcbnl_setdcbx,
469 };
470
471 void bnxt_dcb_init(struct bnxt *bp)
472 {
473 if (bp->hwrm_spec_code < 0x10501)
474 return;
475
476 bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
477 if (BNXT_PF(bp))
478 bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
479 else
480 bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
481 bp->dev->dcbnl_ops = &dcbnl_ops;
482 }
483
484 void bnxt_dcb_free(struct bnxt *bp)
485 {
486 kfree(bp->ieee_pfc);
487 kfree(bp->ieee_ets);
488 bp->ieee_pfc = NULL;
489 bp->ieee_ets = NULL;
490 }
491
492 #else
493
494 void bnxt_dcb_init(struct bnxt *bp)
495 {
496 }
497
498 void bnxt_dcb_free(struct bnxt *bp)
499 {
500 }
501
502 #endif