]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_dcb_82599.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / ixgbe / base / ixgbe_dcb_82599.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
3 */
4
5
6 #include "ixgbe_type.h"
7 #include "ixgbe_dcb.h"
8 #include "ixgbe_dcb_82599.h"
9
10 /**
11 * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
12 * @hw: pointer to hardware structure
13 * @stats: pointer to statistics structure
14 * @tc_count: Number of elements in bwg_array.
15 *
16 * This function returns the status data for each of the Traffic Classes in use.
17 */
18 s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
19 struct ixgbe_hw_stats *stats,
20 u8 tc_count)
21 {
22 int tc;
23
24 DEBUGFUNC("dcb_get_tc_stats");
25
26 if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
27 return IXGBE_ERR_PARAM;
28
29 /* Statistics pertaining to each traffic class */
30 for (tc = 0; tc < tc_count; tc++) {
31 /* Transmitted Packets */
32 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
33 /* Transmitted Bytes (read low first to prevent missed carry) */
34 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
35 stats->qbtc[tc] +=
36 (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
37 /* Received Packets */
38 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
39 /* Received Bytes (read low first to prevent missed carry) */
40 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
41 stats->qbrc[tc] +=
42 (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
43
44 /* Received Dropped Packet */
45 stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
46 }
47
48 return IXGBE_SUCCESS;
49 }
50
51 /**
52 * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
53 * @hw: pointer to hardware structure
54 * @stats: pointer to statistics structure
55 * @tc_count: Number of elements in bwg_array.
56 *
57 * This function returns the CBFC status data for each of the Traffic Classes.
58 */
59 s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
60 struct ixgbe_hw_stats *stats,
61 u8 tc_count)
62 {
63 int tc;
64
65 DEBUGFUNC("dcb_get_pfc_stats");
66
67 if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
68 return IXGBE_ERR_PARAM;
69
70 for (tc = 0; tc < tc_count; tc++) {
71 /* Priority XOFF Transmitted */
72 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
73 /* Priority XOFF Received */
74 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
75 }
76
77 return IXGBE_SUCCESS;
78 }
79
80 /**
81 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
82 * @hw: pointer to hardware structure
83 * @refill: refill credits index by traffic class
84 * @max: max credits index by traffic class
85 * @bwg_id: bandwidth grouping indexed by traffic class
86 * @tsa: transmission selection algorithm indexed by traffic class
87 * @map: priority to tc assignments indexed by priority
88 *
89 * Configure Rx Packet Arbiter and credits for each traffic class.
90 */
91 s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
92 u16 *max, u8 *bwg_id, u8 *tsa,
93 u8 *map)
94 {
95 u32 reg = 0;
96 u32 credit_refill = 0;
97 u32 credit_max = 0;
98 u8 i = 0;
99
100 /*
101 * Disable the arbiter before changing parameters
102 * (always enable recycle mode; WSP)
103 */
104 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
105 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
106
107 /*
108 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
109 * bits sets for the UPs that needs to be mappped to that TC.
110 * e.g if priorities 6 and 7 are to be mapped to a TC then the
111 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
112 */
113 reg = 0;
114 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
115 reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
116
117 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
118
119 /* Configure traffic class credits and priority */
120 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
121 credit_refill = refill[i];
122 credit_max = max[i];
123 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
124
125 reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
126
127 if (tsa[i] == ixgbe_dcb_tsa_strict)
128 reg |= IXGBE_RTRPT4C_LSP;
129
130 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
131 }
132
133 /*
134 * Configure Rx packet plane (recycle mode; WSP) and
135 * enable arbiter
136 */
137 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
138 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
139
140 return IXGBE_SUCCESS;
141 }
142
143 /**
144 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
145 * @hw: pointer to hardware structure
146 * @refill: refill credits index by traffic class
147 * @max: max credits index by traffic class
148 * @bwg_id: bandwidth grouping indexed by traffic class
149 * @tsa: transmission selection algorithm indexed by traffic class
150 *
151 * Configure Tx Descriptor Arbiter and credits for each traffic class.
152 */
153 s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
154 u16 *max, u8 *bwg_id, u8 *tsa)
155 {
156 u32 reg, max_credits;
157 u8 i;
158
159 /* Clear the per-Tx queue credits; we use per-TC instead */
160 for (i = 0; i < 128; i++) {
161 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
162 IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
163 }
164
165 /* Configure traffic class credits and priority */
166 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
167 max_credits = max[i];
168 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
169 reg |= refill[i];
170 reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
171
172 if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
173 reg |= IXGBE_RTTDT2C_GSP;
174
175 if (tsa[i] == ixgbe_dcb_tsa_strict)
176 reg |= IXGBE_RTTDT2C_LSP;
177
178 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
179 }
180
181 /*
182 * Configure Tx descriptor plane (recycle mode; WSP) and
183 * enable arbiter
184 */
185 reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
186 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
187
188 return IXGBE_SUCCESS;
189 }
190
191 /**
192 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
193 * @hw: pointer to hardware structure
194 * @refill: refill credits index by traffic class
195 * @max: max credits index by traffic class
196 * @bwg_id: bandwidth grouping indexed by traffic class
197 * @tsa: transmission selection algorithm indexed by traffic class
198 * @map: priority to tc assignments indexed by priority
199 *
200 * Configure Tx Packet Arbiter and credits for each traffic class.
201 */
202 s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
203 u16 *max, u8 *bwg_id, u8 *tsa,
204 u8 *map)
205 {
206 u32 reg;
207 u8 i;
208
209 /*
210 * Disable the arbiter before changing parameters
211 * (always enable recycle mode; SP; arb delay)
212 */
213 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
214 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
215 IXGBE_RTTPCS_ARBDIS;
216 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
217
218 /*
219 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
220 * bits sets for the UPs that needs to be mappped to that TC.
221 * e.g if priorities 6 and 7 are to be mapped to a TC then the
222 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
223 */
224 reg = 0;
225 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
226 reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
227
228 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
229
230 /* Configure traffic class credits and priority */
231 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
232 reg = refill[i];
233 reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
234 reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
235
236 if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
237 reg |= IXGBE_RTTPT2C_GSP;
238
239 if (tsa[i] == ixgbe_dcb_tsa_strict)
240 reg |= IXGBE_RTTPT2C_LSP;
241
242 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
243 }
244
245 /*
246 * Configure Tx packet plane (recycle mode; SP; arb delay) and
247 * enable arbiter
248 */
249 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
250 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
251 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
252
253 return IXGBE_SUCCESS;
254 }
255
256 /**
257 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
258 * @hw: pointer to hardware structure
259 * @pfc_en: enabled pfc bitmask
260 * @map: priority to tc assignments indexed by priority
261 *
262 * Configure Priority Flow Control (PFC) for each traffic class.
263 */
264 s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
265 {
266 u32 i, j, fcrtl, reg;
267 u8 max_tc = 0;
268
269 /* Enable Transmit Priority Flow Control */
270 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
271
272 /* Enable Receive Priority Flow Control */
273 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
274 reg |= IXGBE_MFLCN_DPF;
275
276 /*
277 * X540 supports per TC Rx priority flow control. So
278 * clear all TCs and only enable those that should be
279 * enabled.
280 */
281 reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
282
283 if (hw->mac.type >= ixgbe_mac_X540)
284 reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
285
286 if (pfc_en)
287 reg |= IXGBE_MFLCN_RPFCE;
288
289 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
290
291 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
292 if (map[i] > max_tc)
293 max_tc = map[i];
294 }
295
296
297 /* Configure PFC Tx thresholds per TC */
298 for (i = 0; i <= max_tc; i++) {
299 int enabled = 0;
300
301 for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
302 if ((map[j] == i) && (pfc_en & (1 << j))) {
303 enabled = 1;
304 break;
305 }
306 }
307
308 if (enabled) {
309 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
310 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
311 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
312 } else {
313 /*
314 * In order to prevent Tx hangs when the internal Tx
315 * switch is enabled we must set the high water mark
316 * to the Rx packet buffer size - 24KB. This allows
317 * the Tx switch to function even under heavy Rx
318 * workloads.
319 */
320 reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
321 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
322 }
323
324 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
325 }
326
327 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
328 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
329 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
330 }
331
332 /* Configure pause time (2 TCs per register) */
333 reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
334 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
335 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
336
337 /* Configure flow control refresh threshold value */
338 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
339
340 return IXGBE_SUCCESS;
341 }
342
343 /**
344 * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
345 * @hw: pointer to hardware structure
346 * @dcb_config: pointer to ixgbe_dcb_config structure
347 *
348 * Configure queue statistics registers, all queues belonging to same traffic
349 * class uses a single set of queue statistics counters.
350 */
351 s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
352 struct ixgbe_dcb_config *dcb_config)
353 {
354 u32 reg = 0;
355 u8 i = 0;
356 u8 tc_count = 8;
357 bool vt_mode = false;
358
359 if (dcb_config != NULL) {
360 tc_count = dcb_config->num_tcs.pg_tcs;
361 vt_mode = dcb_config->vt_mode;
362 }
363
364 if (!((tc_count == 8 && vt_mode == false) || tc_count == 4))
365 return IXGBE_ERR_PARAM;
366
367 if (tc_count == 8 && vt_mode == false) {
368 /*
369 * Receive Queues stats setting
370 * 32 RQSMR registers, each configuring 4 queues.
371 *
372 * Set all 16 queues of each TC to the same stat
373 * with TC 'n' going to stat 'n'.
374 */
375 for (i = 0; i < 32; i++) {
376 reg = 0x01010101 * (i / 4);
377 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
378 }
379 /*
380 * Transmit Queues stats setting
381 * 32 TQSM registers, each controlling 4 queues.
382 *
383 * Set all queues of each TC to the same stat
384 * with TC 'n' going to stat 'n'.
385 * Tx queues are allocated non-uniformly to TCs:
386 * 32, 32, 16, 16, 8, 8, 8, 8.
387 */
388 for (i = 0; i < 32; i++) {
389 if (i < 8)
390 reg = 0x00000000;
391 else if (i < 16)
392 reg = 0x01010101;
393 else if (i < 20)
394 reg = 0x02020202;
395 else if (i < 24)
396 reg = 0x03030303;
397 else if (i < 26)
398 reg = 0x04040404;
399 else if (i < 28)
400 reg = 0x05050505;
401 else if (i < 30)
402 reg = 0x06060606;
403 else
404 reg = 0x07070707;
405 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
406 }
407 } else if (tc_count == 4 && vt_mode == false) {
408 /*
409 * Receive Queues stats setting
410 * 32 RQSMR registers, each configuring 4 queues.
411 *
412 * Set all 16 queues of each TC to the same stat
413 * with TC 'n' going to stat 'n'.
414 */
415 for (i = 0; i < 32; i++) {
416 if (i % 8 > 3)
417 /* In 4 TC mode, odd 16-queue ranges are
418 * not used.
419 */
420 continue;
421 reg = 0x01010101 * (i / 8);
422 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
423 }
424 /*
425 * Transmit Queues stats setting
426 * 32 TQSM registers, each controlling 4 queues.
427 *
428 * Set all queues of each TC to the same stat
429 * with TC 'n' going to stat 'n'.
430 * Tx queues are allocated non-uniformly to TCs:
431 * 64, 32, 16, 16.
432 */
433 for (i = 0; i < 32; i++) {
434 if (i < 16)
435 reg = 0x00000000;
436 else if (i < 24)
437 reg = 0x01010101;
438 else if (i < 28)
439 reg = 0x02020202;
440 else
441 reg = 0x03030303;
442 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
443 }
444 } else if (tc_count == 4 && vt_mode == true) {
445 /*
446 * Receive Queues stats setting
447 * 32 RQSMR registers, each configuring 4 queues.
448 *
449 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
450 * pool. Set all 32 queues of each TC across pools to the same
451 * stat with TC 'n' going to stat 'n'.
452 */
453 for (i = 0; i < 32; i++)
454 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
455 /*
456 * Transmit Queues stats setting
457 * 32 TQSM registers, each controlling 4 queues.
458 *
459 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
460 * pool. Set all 32 queues of each TC across pools to the same
461 * stat with TC 'n' going to stat 'n'.
462 */
463 for (i = 0; i < 32; i++)
464 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
465 }
466
467 return IXGBE_SUCCESS;
468 }
469
470 /**
471 * ixgbe_dcb_config_82599 - Configure general DCB parameters
472 * @hw: pointer to hardware structure
473 * @dcb_config: pointer to ixgbe_dcb_config structure
474 *
475 * Configure general DCB parameters.
476 */
477 s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
478 struct ixgbe_dcb_config *dcb_config)
479 {
480 u32 reg;
481 u32 q;
482
483 /* Disable the Tx desc arbiter so that MTQC can be changed */
484 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
485 reg |= IXGBE_RTTDCS_ARBDIS;
486 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
487
488 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
489 if (dcb_config->num_tcs.pg_tcs == 8) {
490 /* Enable DCB for Rx with 8 TCs */
491 switch (reg & IXGBE_MRQC_MRQE_MASK) {
492 case 0:
493 case IXGBE_MRQC_RT4TCEN:
494 /* RSS disabled cases */
495 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
496 IXGBE_MRQC_RT8TCEN;
497 break;
498 case IXGBE_MRQC_RSSEN:
499 case IXGBE_MRQC_RTRSS4TCEN:
500 /* RSS enabled cases */
501 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
502 IXGBE_MRQC_RTRSS8TCEN;
503 break;
504 default:
505 /*
506 * Unsupported value, assume stale data,
507 * overwrite no RSS
508 */
509 ASSERT(0);
510 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
511 IXGBE_MRQC_RT8TCEN;
512 }
513 }
514 if (dcb_config->num_tcs.pg_tcs == 4) {
515 /* We support both VT-on and VT-off with 4 TCs. */
516 if (dcb_config->vt_mode)
517 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
518 IXGBE_MRQC_VMDQRT4TCEN;
519 else
520 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
521 IXGBE_MRQC_RTRSS4TCEN;
522 }
523 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
524
525 /* Enable DCB for Tx with 8 TCs */
526 if (dcb_config->num_tcs.pg_tcs == 8)
527 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
528 else {
529 /* We support both VT-on and VT-off with 4 TCs. */
530 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
531 if (dcb_config->vt_mode)
532 reg |= IXGBE_MTQC_VT_ENA;
533 }
534 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
535
536 /* Disable drop for all queues */
537 for (q = 0; q < 128; q++)
538 IXGBE_WRITE_REG(hw, IXGBE_QDE,
539 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
540
541 /* Enable the Tx desc arbiter */
542 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
543 reg &= ~IXGBE_RTTDCS_ARBDIS;
544 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
545
546 /* Enable Security TX Buffer IFG for DCB */
547 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
548 reg |= IXGBE_SECTX_DCB;
549 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
550
551 return IXGBE_SUCCESS;
552 }
553
554 /**
555 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
556 * @hw: pointer to hardware structure
557 * @link_speed: unused
558 * @refill: refill credits index by traffic class
559 * @max: max credits index by traffic class
560 * @bwg_id: bandwidth grouping indexed by traffic class
561 * @tsa: transmission selection algorithm indexed by traffic class
562 * @map: priority to tc assignments indexed by priority
563 *
564 * Configure dcb settings and enable dcb mode.
565 */
566 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
567 u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
568 u8 *map)
569 {
570 UNREFERENCED_1PARAMETER(link_speed);
571
572 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
573 map);
574 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
575 tsa);
576 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
577 tsa, map);
578
579 return IXGBE_SUCCESS;
580 }
581