]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / ixgbe / rte_pmd_ixgbe.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <rte_ethdev.h>
35
36 #include "base/ixgbe_api.h"
37 #include "ixgbe_ethdev.h"
38 #include "rte_pmd_ixgbe.h"
39
40 int
41 rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
42 struct ether_addr *mac_addr)
43 {
44 struct ixgbe_hw *hw;
45 struct ixgbe_vf_info *vfinfo;
46 int rar_entry;
47 uint8_t *new_mac = (uint8_t *)(mac_addr);
48 struct rte_eth_dev *dev;
49 struct rte_pci_device *pci_dev;
50
51 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
52
53 dev = &rte_eth_devices[port];
54 pci_dev = IXGBE_DEV_TO_PCI(dev);
55
56 if (!is_ixgbe_supported(dev))
57 return -ENOTSUP;
58
59 if (vf >= pci_dev->max_vfs)
60 return -EINVAL;
61
62 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
63 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
64 rar_entry = hw->mac.num_rar_entries - (vf + 1);
65
66 if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
67 rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
68 ETHER_ADDR_LEN);
69 return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
70 IXGBE_RAH_AV);
71 }
72 return -EINVAL;
73 }
74
75 int
76 rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
77 {
78 struct ixgbe_hw *hw;
79 struct ixgbe_vf_info *vfinfo;
80 struct rte_eth_dev *dev;
81 struct rte_pci_device *pci_dev;
82 uint32_t ctrl;
83
84 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
85
86 dev = &rte_eth_devices[port];
87 pci_dev = IXGBE_DEV_TO_PCI(dev);
88
89 if (!is_ixgbe_supported(dev))
90 return -ENOTSUP;
91
92 if (vf >= pci_dev->max_vfs)
93 return -EINVAL;
94
95 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
96 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
97
98 ctrl = IXGBE_PF_CONTROL_MSG;
99 if (vfinfo[vf].clear_to_send)
100 ctrl |= IXGBE_VT_MSGTYPE_CTS;
101
102 ixgbe_write_mbx(hw, &ctrl, 1, vf);
103
104 return 0;
105 }
106
107 int
108 rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
109 {
110 struct ixgbe_hw *hw;
111 struct ixgbe_mac_info *mac;
112 struct rte_eth_dev *dev;
113 struct rte_pci_device *pci_dev;
114
115 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
116
117 dev = &rte_eth_devices[port];
118 pci_dev = IXGBE_DEV_TO_PCI(dev);
119
120 if (!is_ixgbe_supported(dev))
121 return -ENOTSUP;
122
123 if (vf >= pci_dev->max_vfs)
124 return -EINVAL;
125
126 if (on > 1)
127 return -EINVAL;
128
129 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
130 mac = &hw->mac;
131
132 mac->ops.set_vlan_anti_spoofing(hw, on, vf);
133
134 return 0;
135 }
136
137 int
138 rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
139 {
140 struct ixgbe_hw *hw;
141 struct ixgbe_mac_info *mac;
142 struct rte_eth_dev *dev;
143 struct rte_pci_device *pci_dev;
144
145 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
146
147 dev = &rte_eth_devices[port];
148 pci_dev = IXGBE_DEV_TO_PCI(dev);
149
150 if (!is_ixgbe_supported(dev))
151 return -ENOTSUP;
152
153 if (vf >= pci_dev->max_vfs)
154 return -EINVAL;
155
156 if (on > 1)
157 return -EINVAL;
158
159 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
160 mac = &hw->mac;
161 mac->ops.set_mac_anti_spoofing(hw, on, vf);
162
163 return 0;
164 }
165
166 int
167 rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
168 {
169 struct ixgbe_hw *hw;
170 uint32_t ctrl;
171 struct rte_eth_dev *dev;
172 struct rte_pci_device *pci_dev;
173
174 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
175
176 dev = &rte_eth_devices[port];
177 pci_dev = IXGBE_DEV_TO_PCI(dev);
178
179 if (!is_ixgbe_supported(dev))
180 return -ENOTSUP;
181
182 if (vf >= pci_dev->max_vfs)
183 return -EINVAL;
184
185 if (vlan_id > ETHER_MAX_VLAN_ID)
186 return -EINVAL;
187
188 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
189 ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
190 if (vlan_id) {
191 ctrl = vlan_id;
192 ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
193 } else {
194 ctrl = 0;
195 }
196
197 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
198
199 return 0;
200 }
201
202 int
203 rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
204 {
205 struct ixgbe_hw *hw;
206 uint32_t ctrl;
207 struct rte_eth_dev *dev;
208
209 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
210
211 dev = &rte_eth_devices[port];
212
213 if (!is_ixgbe_supported(dev))
214 return -ENOTSUP;
215
216 if (on > 1)
217 return -EINVAL;
218
219 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
220 ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
221 /* enable or disable VMDQ loopback */
222 if (on)
223 ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
224 else
225 ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
226
227 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
228
229 return 0;
230 }
231
232 int
233 rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
234 {
235 struct ixgbe_hw *hw;
236 uint32_t reg_value;
237 int i;
238 int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
239 struct rte_eth_dev *dev;
240
241 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
242
243 dev = &rte_eth_devices[port];
244
245 if (!is_ixgbe_supported(dev))
246 return -ENOTSUP;
247
248 if (on > 1)
249 return -EINVAL;
250
251 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
252 for (i = 0; i <= num_queues; i++) {
253 reg_value = IXGBE_QDE_WRITE |
254 (i << IXGBE_QDE_IDX_SHIFT) |
255 (on & IXGBE_QDE_ENABLE);
256 IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
257 }
258
259 return 0;
260 }
261
262 int
263 rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
264 {
265 struct ixgbe_hw *hw;
266 uint32_t reg_value;
267 struct rte_eth_dev *dev;
268 struct rte_pci_device *pci_dev;
269
270 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
271
272 dev = &rte_eth_devices[port];
273 pci_dev = IXGBE_DEV_TO_PCI(dev);
274
275 if (!is_ixgbe_supported(dev))
276 return -ENOTSUP;
277
278 /* only support VF's 0 to 63 */
279 if ((vf >= pci_dev->max_vfs) || (vf > 63))
280 return -EINVAL;
281
282 if (on > 1)
283 return -EINVAL;
284
285 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
286 reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
287 if (on)
288 reg_value |= IXGBE_SRRCTL_DROP_EN;
289 else
290 reg_value &= ~IXGBE_SRRCTL_DROP_EN;
291
292 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
293
294 return 0;
295 }
296
297 int
298 rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
299 {
300 struct rte_eth_dev *dev;
301 struct rte_pci_device *pci_dev;
302 struct ixgbe_hw *hw;
303 uint16_t queues_per_pool;
304 uint32_t q;
305
306 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
307
308 dev = &rte_eth_devices[port];
309 pci_dev = IXGBE_DEV_TO_PCI(dev);
310 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
311
312 if (!is_ixgbe_supported(dev))
313 return -ENOTSUP;
314
315 if (vf >= pci_dev->max_vfs)
316 return -EINVAL;
317
318 if (on > 1)
319 return -EINVAL;
320
321 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
322
323 /* The PF has 128 queue pairs and in SRIOV configuration
324 * those queues will be assigned to VF's, so RXDCTL
325 * registers will be dealing with queues which will be
326 * assigned to VF's.
327 * Let's say we have SRIOV configured with 31 VF's then the
328 * first 124 queues 0-123 will be allocated to VF's and only
329 * the last 4 queues 123-127 will be assigned to the PF.
330 */
331 if (hw->mac.type == ixgbe_mac_82598EB)
332 queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
333 ETH_16_POOLS;
334 else
335 queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
336 ETH_64_POOLS;
337
338 for (q = 0; q < queues_per_pool; q++)
339 (*dev->dev_ops->vlan_strip_queue_set)(dev,
340 q + vf * queues_per_pool, on);
341 return 0;
342 }
343
344 int
345 rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf,
346 uint16_t rx_mask, uint8_t on)
347 {
348 int val = 0;
349 struct rte_eth_dev *dev;
350 struct rte_pci_device *pci_dev;
351 struct ixgbe_hw *hw;
352 uint32_t vmolr;
353
354 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
355
356 dev = &rte_eth_devices[port];
357 pci_dev = IXGBE_DEV_TO_PCI(dev);
358
359 if (!is_ixgbe_supported(dev))
360 return -ENOTSUP;
361
362 if (vf >= pci_dev->max_vfs)
363 return -EINVAL;
364
365 if (on > 1)
366 return -EINVAL;
367
368 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
369 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
370
371 if (hw->mac.type == ixgbe_mac_82598EB) {
372 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
373 " on 82599 hardware and newer");
374 return -ENOTSUP;
375 }
376 if (ixgbe_vt_check(hw) < 0)
377 return -ENOTSUP;
378
379 val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
380
381 if (on)
382 vmolr |= val;
383 else
384 vmolr &= ~val;
385
386 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
387
388 return 0;
389 }
390
391 int
392 rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
393 {
394 struct rte_eth_dev *dev;
395 struct rte_pci_device *pci_dev;
396 uint32_t reg, addr;
397 uint32_t val;
398 const uint8_t bit1 = 0x1;
399 struct ixgbe_hw *hw;
400
401 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
402
403 dev = &rte_eth_devices[port];
404 pci_dev = IXGBE_DEV_TO_PCI(dev);
405
406 if (!is_ixgbe_supported(dev))
407 return -ENOTSUP;
408
409 if (vf >= pci_dev->max_vfs)
410 return -EINVAL;
411
412 if (on > 1)
413 return -EINVAL;
414
415 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
416
417 if (ixgbe_vt_check(hw) < 0)
418 return -ENOTSUP;
419
420 /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
421 if (vf >= 32) {
422 addr = IXGBE_VFRE(1);
423 val = bit1 << (vf - 32);
424 } else {
425 addr = IXGBE_VFRE(0);
426 val = bit1 << vf;
427 }
428
429 reg = IXGBE_READ_REG(hw, addr);
430
431 if (on)
432 reg |= val;
433 else
434 reg &= ~val;
435
436 IXGBE_WRITE_REG(hw, addr, reg);
437
438 return 0;
439 }
440
441 int
442 rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
443 {
444 struct rte_eth_dev *dev;
445 struct rte_pci_device *pci_dev;
446 uint32_t reg, addr;
447 uint32_t val;
448 const uint8_t bit1 = 0x1;
449
450 struct ixgbe_hw *hw;
451
452 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
453
454 dev = &rte_eth_devices[port];
455 pci_dev = IXGBE_DEV_TO_PCI(dev);
456
457 if (!is_ixgbe_supported(dev))
458 return -ENOTSUP;
459
460 if (vf >= pci_dev->max_vfs)
461 return -EINVAL;
462
463 if (on > 1)
464 return -EINVAL;
465
466 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
467 if (ixgbe_vt_check(hw) < 0)
468 return -ENOTSUP;
469
470 /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
471 if (vf >= 32) {
472 addr = IXGBE_VFTE(1);
473 val = bit1 << (vf - 32);
474 } else {
475 addr = IXGBE_VFTE(0);
476 val = bit1 << vf;
477 }
478
479 reg = IXGBE_READ_REG(hw, addr);
480
481 if (on)
482 reg |= val;
483 else
484 reg &= ~val;
485
486 IXGBE_WRITE_REG(hw, addr, reg);
487
488 return 0;
489 }
490
491 int
492 rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
493 uint64_t vf_mask, uint8_t vlan_on)
494 {
495 struct rte_eth_dev *dev;
496 int ret = 0;
497 uint16_t vf_idx;
498 struct ixgbe_hw *hw;
499
500 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
501
502 dev = &rte_eth_devices[port];
503
504 if (!is_ixgbe_supported(dev))
505 return -ENOTSUP;
506
507 if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
508 return -EINVAL;
509
510 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
511 if (ixgbe_vt_check(hw) < 0)
512 return -ENOTSUP;
513
514 for (vf_idx = 0; vf_idx < 64; vf_idx++) {
515 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
516 ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
517 vlan_on, false);
518 if (ret < 0)
519 return ret;
520 }
521 }
522
523 return ret;
524 }
525
526 int
527 rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf,
528 uint16_t tx_rate, uint64_t q_msk)
529 {
530 struct rte_eth_dev *dev;
531
532 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
533
534 dev = &rte_eth_devices[port];
535
536 if (!is_ixgbe_supported(dev))
537 return -ENOTSUP;
538
539 return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
540 }
541
542 int
543 rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
544 {
545 struct ixgbe_hw *hw;
546 struct rte_eth_dev *dev;
547 uint32_t ctrl;
548
549 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
550
551 dev = &rte_eth_devices[port];
552
553 if (!is_ixgbe_supported(dev))
554 return -ENOTSUP;
555
556 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
557
558 /* Stop the data paths */
559 if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
560 return -ENOTSUP;
561 /**
562 * Workaround:
563 * As no ixgbe_disable_sec_rx_path equivalent is
564 * implemented for tx in the base code, and we are
565 * not allowed to modify the base code in DPDK, so
566 * just call the hand-written one directly for now.
567 * The hardware support has been checked by
568 * ixgbe_disable_sec_rx_path().
569 */
570 ixgbe_disable_sec_tx_path_generic(hw);
571
572 /* Enable Ethernet CRC (required by MACsec offload) */
573 ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
574 ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
575 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
576
577 /* Enable the TX and RX crypto engines */
578 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
579 ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
580 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
581
582 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
583 ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
584 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
585
586 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
587 ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
588 ctrl |= 0x3;
589 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
590
591 /* Enable SA lookup */
592 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
593 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
594 ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
595 IXGBE_LSECTXCTRL_AUTH;
596 ctrl |= IXGBE_LSECTXCTRL_AISCI;
597 ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
598 ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
599 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
600
601 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
602 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
603 ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
604 ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
605 if (rp)
606 ctrl |= IXGBE_LSECRXCTRL_RP;
607 else
608 ctrl &= ~IXGBE_LSECRXCTRL_RP;
609 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
610
611 /* Start the data paths */
612 ixgbe_enable_sec_rx_path(hw);
613 /**
614 * Workaround:
615 * As no ixgbe_enable_sec_rx_path equivalent is
616 * implemented for tx in the base code, and we are
617 * not allowed to modify the base code in DPDK, so
618 * just call the hand-written one directly for now.
619 */
620 ixgbe_enable_sec_tx_path_generic(hw);
621
622 return 0;
623 }
624
625 int
626 rte_pmd_ixgbe_macsec_disable(uint8_t port)
627 {
628 struct ixgbe_hw *hw;
629 struct rte_eth_dev *dev;
630 uint32_t ctrl;
631
632 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
633
634 dev = &rte_eth_devices[port];
635
636 if (!is_ixgbe_supported(dev))
637 return -ENOTSUP;
638
639 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
640
641 /* Stop the data paths */
642 if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
643 return -ENOTSUP;
644 /**
645 * Workaround:
646 * As no ixgbe_disable_sec_rx_path equivalent is
647 * implemented for tx in the base code, and we are
648 * not allowed to modify the base code in DPDK, so
649 * just call the hand-written one directly for now.
650 * The hardware support has been checked by
651 * ixgbe_disable_sec_rx_path().
652 */
653 ixgbe_disable_sec_tx_path_generic(hw);
654
655 /* Disable the TX and RX crypto engines */
656 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
657 ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
658 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
659
660 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
661 ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
662 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
663
664 /* Disable SA lookup */
665 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
666 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
667 ctrl |= IXGBE_LSECTXCTRL_DISABLE;
668 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
669
670 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
671 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
672 ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
673 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
674
675 /* Start the data paths */
676 ixgbe_enable_sec_rx_path(hw);
677 /**
678 * Workaround:
679 * As no ixgbe_enable_sec_rx_path equivalent is
680 * implemented for tx in the base code, and we are
681 * not allowed to modify the base code in DPDK, so
682 * just call the hand-written one directly for now.
683 */
684 ixgbe_enable_sec_tx_path_generic(hw);
685
686 return 0;
687 }
688
689 int
690 rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
691 {
692 struct ixgbe_hw *hw;
693 struct rte_eth_dev *dev;
694 uint32_t ctrl;
695
696 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
697
698 dev = &rte_eth_devices[port];
699
700 if (!is_ixgbe_supported(dev))
701 return -ENOTSUP;
702
703 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
704
705 ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
706 IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
707
708 ctrl = mac[4] | (mac[5] << 8);
709 IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
710
711 return 0;
712 }
713
714 int
715 rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
716 {
717 struct ixgbe_hw *hw;
718 struct rte_eth_dev *dev;
719 uint32_t ctrl;
720
721 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
722
723 dev = &rte_eth_devices[port];
724
725 if (!is_ixgbe_supported(dev))
726 return -ENOTSUP;
727
728 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
729
730 ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
731 IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
732
733 pi = rte_cpu_to_be_16(pi);
734 ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
735 IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
736
737 return 0;
738 }
739
740 int
741 rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
742 uint32_t pn, uint8_t *key)
743 {
744 struct ixgbe_hw *hw;
745 struct rte_eth_dev *dev;
746 uint32_t ctrl, i;
747
748 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
749
750 dev = &rte_eth_devices[port];
751
752 if (!is_ixgbe_supported(dev))
753 return -ENOTSUP;
754
755 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
756
757 if (idx != 0 && idx != 1)
758 return -EINVAL;
759
760 if (an >= 4)
761 return -EINVAL;
762
763 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
764
765 /* Set the PN and key */
766 pn = rte_cpu_to_be_32(pn);
767 if (idx == 0) {
768 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
769
770 for (i = 0; i < 4; i++) {
771 ctrl = (key[i * 4 + 0] << 0) |
772 (key[i * 4 + 1] << 8) |
773 (key[i * 4 + 2] << 16) |
774 (key[i * 4 + 3] << 24);
775 IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
776 }
777 } else {
778 IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
779
780 for (i = 0; i < 4; i++) {
781 ctrl = (key[i * 4 + 0] << 0) |
782 (key[i * 4 + 1] << 8) |
783 (key[i * 4 + 2] << 16) |
784 (key[i * 4 + 3] << 24);
785 IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
786 }
787 }
788
789 /* Set AN and select the SA */
790 ctrl = (an << idx * 2) | (idx << 4);
791 IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
792
793 return 0;
794 }
795
796 int
797 rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
798 uint32_t pn, uint8_t *key)
799 {
800 struct ixgbe_hw *hw;
801 struct rte_eth_dev *dev;
802 uint32_t ctrl, i;
803
804 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
805
806 dev = &rte_eth_devices[port];
807
808 if (!is_ixgbe_supported(dev))
809 return -ENOTSUP;
810
811 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
812
813 if (idx != 0 && idx != 1)
814 return -EINVAL;
815
816 if (an >= 4)
817 return -EINVAL;
818
819 /* Set the PN */
820 pn = rte_cpu_to_be_32(pn);
821 IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
822
823 /* Set the key */
824 for (i = 0; i < 4; i++) {
825 ctrl = (key[i * 4 + 0] << 0) |
826 (key[i * 4 + 1] << 8) |
827 (key[i * 4 + 2] << 16) |
828 (key[i * 4 + 3] << 24);
829 IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
830 }
831
832 /* Set the AN and validate the SA */
833 ctrl = an | (1 << 2);
834 IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
835
836 return 0;
837 }
838
839 int
840 rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
841 uint8_t tc_num,
842 uint8_t *bw_weight)
843 {
844 struct rte_eth_dev *dev;
845 struct ixgbe_dcb_config *dcb_config;
846 struct ixgbe_dcb_tc_config *tc;
847 struct rte_eth_conf *eth_conf;
848 struct ixgbe_bw_conf *bw_conf;
849 uint8_t i;
850 uint8_t nb_tcs;
851 uint16_t sum;
852
853 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
854
855 dev = &rte_eth_devices[port];
856
857 if (!is_ixgbe_supported(dev))
858 return -ENOTSUP;
859
860 if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
861 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
862 IXGBE_DCB_MAX_TRAFFIC_CLASS);
863 return -EINVAL;
864 }
865
866 dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
867 bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
868 eth_conf = &dev->data->dev_conf;
869
870 if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
871 nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
872 } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
873 if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
874 ETH_32_POOLS)
875 nb_tcs = ETH_4_TCS;
876 else
877 nb_tcs = ETH_8_TCS;
878 } else {
879 nb_tcs = 1;
880 }
881
882 if (nb_tcs != tc_num) {
883 PMD_DRV_LOG(ERR,
884 "Weight should be set for all %d enabled TCs.",
885 nb_tcs);
886 return -EINVAL;
887 }
888
889 sum = 0;
890 for (i = 0; i < nb_tcs; i++)
891 sum += bw_weight[i];
892 if (sum != 100) {
893 PMD_DRV_LOG(ERR,
894 "The summary of the TC weight should be 100.");
895 return -EINVAL;
896 }
897
898 for (i = 0; i < nb_tcs; i++) {
899 tc = &dcb_config->tc_config[i];
900 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
901 }
902 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
903 tc = &dcb_config->tc_config[i];
904 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
905 }
906
907 bw_conf->tc_num = nb_tcs;
908
909 return 0;
910 }