]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/bonding/rte_eth_bond_api.c
bump version to 12.2.12-pve1
[ceph.git] / ceph / src / dpdk / drivers / net / bonding / rte_eth_bond_api.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <string.h>
35
36 #include <rte_mbuf.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
39 #include <rte_tcp.h>
40
41 #include "rte_eth_bond.h"
42 #include "rte_eth_bond_private.h"
43 #include "rte_eth_bond_8023ad_private.h"
44
45 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
46
47 const char pmd_bond_driver_name[] = "rte_bond_pmd";
48
49 int
50 check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
51 {
52 /* Check valid pointer */
53 if (eth_dev->data->drv_name == NULL)
54 return -1;
55
56 /* return 0 if driver name matches */
57 return eth_dev->data->drv_name != pmd_bond_driver_name;
58 }
59
60 int
61 valid_bonded_port_id(uint8_t port_id)
62 {
63 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
64 return check_for_bonded_ethdev(&rte_eth_devices[port_id]);
65 }
66
67 int
68 valid_slave_port_id(uint8_t port_id)
69 {
70 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
71
72 /* Verify that port_id refers to a non bonded port */
73 if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0)
74 return -1;
75
76 return 0;
77 }
78
79 void
80 activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
81 {
82 struct bond_dev_private *internals = eth_dev->data->dev_private;
83 uint8_t active_count = internals->active_slave_count;
84
85 if (internals->mode == BONDING_MODE_8023AD)
86 bond_mode_8023ad_activate_slave(eth_dev, port_id);
87
88 if (internals->mode == BONDING_MODE_TLB
89 || internals->mode == BONDING_MODE_ALB) {
90
91 internals->tlb_slaves_order[active_count] = port_id;
92 }
93
94 RTE_ASSERT(internals->active_slave_count <
95 (RTE_DIM(internals->active_slaves) - 1));
96
97 internals->active_slaves[internals->active_slave_count] = port_id;
98 internals->active_slave_count++;
99
100 if (internals->mode == BONDING_MODE_TLB)
101 bond_tlb_activate_slave(internals);
102 if (internals->mode == BONDING_MODE_ALB)
103 bond_mode_alb_client_list_upd(eth_dev);
104 }
105
106 void
107 deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
108 {
109 uint8_t slave_pos;
110 struct bond_dev_private *internals = eth_dev->data->dev_private;
111 uint8_t active_count = internals->active_slave_count;
112
113 if (internals->mode == BONDING_MODE_8023AD) {
114 bond_mode_8023ad_stop(eth_dev);
115 bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
116 } else if (internals->mode == BONDING_MODE_TLB
117 || internals->mode == BONDING_MODE_ALB)
118 bond_tlb_disable(internals);
119
120 slave_pos = find_slave_by_id(internals->active_slaves, active_count,
121 port_id);
122
123 /* If slave was not at the end of the list
124 * shift active slaves up active array list */
125 if (slave_pos < active_count) {
126 active_count--;
127 memmove(internals->active_slaves + slave_pos,
128 internals->active_slaves + slave_pos + 1,
129 (active_count - slave_pos) *
130 sizeof(internals->active_slaves[0]));
131 }
132
133 RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
134 internals->active_slave_count = active_count;
135
136 if (eth_dev->data->dev_started) {
137 if (internals->mode == BONDING_MODE_8023AD) {
138 bond_mode_8023ad_start(eth_dev);
139 } else if (internals->mode == BONDING_MODE_TLB) {
140 bond_tlb_enable(internals);
141 } else if (internals->mode == BONDING_MODE_ALB) {
142 bond_tlb_enable(internals);
143 bond_mode_alb_client_list_upd(eth_dev);
144 }
145 }
146 }
147
148 uint8_t
149 number_of_sockets(void)
150 {
151 int sockets = 0;
152 int i;
153 const struct rte_memseg *ms = rte_eal_get_physmem_layout();
154
155 for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
156 if (sockets < ms[i].socket_id)
157 sockets = ms[i].socket_id;
158 }
159
160 /* Number of sockets = maximum socket_id + 1 */
161 return ++sockets;
162 }
163
164 int
165 rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
166 {
167 struct bond_dev_private *internals = NULL;
168 struct rte_eth_dev *eth_dev = NULL;
169 uint32_t vlan_filter_bmp_size;
170
171 /* now do all data allocation - for eth_dev structure, dummy pci driver
172 * and internal (private) data
173 */
174
175 if (name == NULL) {
176 RTE_BOND_LOG(ERR, "Invalid name specified");
177 goto err;
178 }
179
180 if (socket_id >= number_of_sockets()) {
181 RTE_BOND_LOG(ERR,
182 "Invalid socket id specified to create bonded device on.");
183 goto err;
184 }
185
186 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, socket_id);
187 if (internals == NULL) {
188 RTE_BOND_LOG(ERR, "Unable to malloc internals on socket");
189 goto err;
190 }
191
192 /* reserve an ethdev entry */
193 eth_dev = rte_eth_dev_allocate(name);
194 if (eth_dev == NULL) {
195 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
196 goto err;
197 }
198
199 eth_dev->data->dev_private = internals;
200 eth_dev->data->nb_rx_queues = (uint16_t)1;
201 eth_dev->data->nb_tx_queues = (uint16_t)1;
202
203 TAILQ_INIT(&(eth_dev->link_intr_cbs));
204
205 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
206
207 eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
208 socket_id);
209 if (eth_dev->data->mac_addrs == NULL) {
210 RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
211 goto err;
212 }
213
214 eth_dev->data->dev_started = 0;
215 eth_dev->data->promiscuous = 0;
216 eth_dev->data->scattered_rx = 0;
217 eth_dev->data->all_multicast = 0;
218
219 eth_dev->dev_ops = &default_dev_ops;
220 eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
221 RTE_ETH_DEV_DETACHABLE;
222 eth_dev->driver = NULL;
223 eth_dev->data->kdrv = RTE_KDRV_NONE;
224 eth_dev->data->drv_name = pmd_bond_driver_name;
225 eth_dev->data->numa_node = socket_id;
226
227 rte_spinlock_init(&internals->lock);
228
229 internals->port_id = eth_dev->data->port_id;
230 internals->mode = BONDING_MODE_INVALID;
231 internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
232 internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
233 internals->xmit_hash = xmit_l2_hash;
234 internals->user_defined_mac = 0;
235 internals->link_props_set = 0;
236
237 internals->link_status_polling_enabled = 0;
238
239 internals->link_status_polling_interval_ms = DEFAULT_POLLING_INTERVAL_10_MS;
240 internals->link_down_delay_ms = 0;
241 internals->link_up_delay_ms = 0;
242
243 internals->slave_count = 0;
244 internals->active_slave_count = 0;
245 internals->rx_offload_capa = 0;
246 internals->tx_offload_capa = 0;
247 internals->candidate_max_rx_pktlen = 0;
248 internals->max_rx_pktlen = 0;
249
250 /* Initially allow to choose any offload type */
251 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
252
253 memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
254 memset(internals->slaves, 0, sizeof(internals->slaves));
255
256 /* Set mode 4 default configuration */
257 bond_mode_8023ad_setup(eth_dev, NULL);
258 if (bond_ethdev_mode_set(eth_dev, mode)) {
259 RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
260 eth_dev->data->port_id, mode);
261 goto err;
262 }
263
264 vlan_filter_bmp_size =
265 rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
266 internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
267 RTE_CACHE_LINE_SIZE);
268 if (internals->vlan_filter_bmpmem == NULL) {
269 RTE_BOND_LOG(ERR,
270 "Failed to allocate vlan bitmap for bonded device %u\n",
271 eth_dev->data->port_id);
272 goto err;
273 }
274
275 internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
276 internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
277 if (internals->vlan_filter_bmp == NULL) {
278 RTE_BOND_LOG(ERR,
279 "Failed to init vlan bitmap for bonded device %u\n",
280 eth_dev->data->port_id);
281 rte_free(internals->vlan_filter_bmpmem);
282 goto err;
283 }
284
285 return eth_dev->data->port_id;
286
287 err:
288 rte_free(internals);
289 if (eth_dev != NULL) {
290 rte_free(eth_dev->data->mac_addrs);
291 rte_eth_dev_release_port(eth_dev);
292 }
293 return -1;
294 }
295
296 int
297 rte_eth_bond_free(const char *name)
298 {
299 struct rte_eth_dev *eth_dev = NULL;
300 struct bond_dev_private *internals;
301
302 /* now free all data allocation - for eth_dev structure,
303 * dummy pci driver and internal (private) data
304 */
305
306 /* find an ethdev entry */
307 eth_dev = rte_eth_dev_allocated(name);
308 if (eth_dev == NULL)
309 return -ENODEV;
310
311 internals = eth_dev->data->dev_private;
312 if (internals->slave_count != 0)
313 return -EBUSY;
314
315 if (eth_dev->data->dev_started == 1) {
316 bond_ethdev_stop(eth_dev);
317 bond_ethdev_close(eth_dev);
318 }
319
320 eth_dev->dev_ops = NULL;
321 eth_dev->rx_pkt_burst = NULL;
322 eth_dev->tx_pkt_burst = NULL;
323
324 internals = eth_dev->data->dev_private;
325 rte_bitmap_free(internals->vlan_filter_bmp);
326 rte_free(internals->vlan_filter_bmpmem);
327 rte_free(eth_dev->data->dev_private);
328 rte_free(eth_dev->data->mac_addrs);
329
330 rte_eth_dev_release_port(eth_dev);
331
332 return 0;
333 }
334
335 static int
336 slave_vlan_filter_set(uint8_t bonded_port_id, uint8_t slave_port_id)
337 {
338 struct rte_eth_dev *bonded_eth_dev;
339 struct bond_dev_private *internals;
340 int found;
341 int res = 0;
342 uint64_t slab = 0;
343 uint32_t pos = 0;
344 uint16_t first;
345
346 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
347 if (bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter == 0)
348 return 0;
349
350 internals = bonded_eth_dev->data->dev_private;
351 found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab);
352 first = pos;
353
354 if (!found)
355 return 0;
356
357 do {
358 uint32_t i;
359 uint64_t mask;
360
361 for (i = 0, mask = 1;
362 i < RTE_BITMAP_SLAB_BIT_SIZE;
363 i ++, mask <<= 1) {
364 if (unlikely(slab & mask))
365 res = rte_eth_dev_vlan_filter(slave_port_id,
366 (uint16_t)pos, 1);
367 }
368 found = rte_bitmap_scan(internals->vlan_filter_bmp,
369 &pos, &slab);
370 } while (found && first != pos && res == 0);
371
372 return res;
373 }
374
375 static int
376 __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
377 {
378 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
379 struct bond_dev_private *internals;
380 struct rte_eth_link link_props;
381 struct rte_eth_dev_info dev_info;
382
383 if (valid_slave_port_id(slave_port_id) != 0)
384 return -1;
385
386 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
387 internals = bonded_eth_dev->data->dev_private;
388
389 slave_eth_dev = &rte_eth_devices[slave_port_id];
390 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
391 RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device");
392 return -1;
393 }
394
395 /* Add slave details to bonded device */
396 slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE;
397
398 rte_eth_dev_info_get(slave_port_id, &dev_info);
399 if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) {
400 RTE_BOND_LOG(ERR, "Slave (port %u) max_rx_pktlen too small",
401 slave_port_id);
402 return -1;
403 }
404
405 slave_add(internals, slave_eth_dev);
406
407 /* We need to store slaves reta_size to be able to synchronize RETA for all
408 * slave devices even if its sizes are different.
409 */
410 internals->slaves[internals->slave_count].reta_size = dev_info.reta_size;
411
412 if (internals->slave_count < 1) {
413 /* if MAC is not user defined then use MAC of first slave add to
414 * bonded device */
415 if (!internals->user_defined_mac)
416 mac_address_set(bonded_eth_dev, slave_eth_dev->data->mac_addrs);
417
418 /* Inherit eth dev link properties from first slave */
419 link_properties_set(bonded_eth_dev,
420 &(slave_eth_dev->data->dev_link));
421
422 /* Make primary slave */
423 internals->primary_port = slave_port_id;
424 internals->current_primary_port = slave_port_id;
425
426 /* Inherit queues settings from first slave */
427 internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
428 internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
429
430 internals->reta_size = dev_info.reta_size;
431
432 /* Take the first dev's offload capabilities */
433 internals->rx_offload_capa = dev_info.rx_offload_capa;
434 internals->tx_offload_capa = dev_info.tx_offload_capa;
435 internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads;
436
437 /* Inherit first slave's max rx packet size */
438 internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen;
439
440 } else {
441 internals->rx_offload_capa &= dev_info.rx_offload_capa;
442 internals->tx_offload_capa &= dev_info.tx_offload_capa;
443 internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads;
444
445 /* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
446 * the power of 2, the lower one is GCD
447 */
448 if (internals->reta_size > dev_info.reta_size)
449 internals->reta_size = dev_info.reta_size;
450
451 if (!internals->max_rx_pktlen &&
452 dev_info.max_rx_pktlen < internals->candidate_max_rx_pktlen)
453 internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen;
454 }
455
456 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
457 internals->flow_type_rss_offloads;
458
459 internals->slave_count++;
460
461 /* Update all slave devices MACs*/
462 mac_address_slaves_update(bonded_eth_dev);
463
464 if (bonded_eth_dev->data->dev_started) {
465 if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) {
466 slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
467 RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d",
468 slave_port_id);
469 return -1;
470 }
471 }
472
473 /* Register link status change callback with bonded device pointer as
474 * argument*/
475 rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
476 bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id);
477
478 /* If bonded device is started then we can add the slave to our active
479 * slave array */
480 if (bonded_eth_dev->data->dev_started) {
481 rte_eth_link_get_nowait(slave_port_id, &link_props);
482
483 if (link_props.link_status == ETH_LINK_UP) {
484 if (internals->active_slave_count == 0 &&
485 !internals->user_defined_primary_port)
486 bond_ethdev_primary_set(internals,
487 slave_port_id);
488
489 if (find_slave_by_id(internals->active_slaves,
490 internals->active_slave_count,
491 slave_port_id) == internals->active_slave_count)
492 activate_slave(bonded_eth_dev, slave_port_id);
493 }
494 }
495
496 slave_vlan_filter_set(bonded_port_id, slave_port_id);
497
498 return 0;
499
500 }
501
502 int
503 rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id)
504 {
505 struct rte_eth_dev *bonded_eth_dev;
506 struct bond_dev_private *internals;
507
508 int retval;
509
510 /* Verify that port id's are valid bonded and slave ports */
511 if (valid_bonded_port_id(bonded_port_id) != 0)
512 return -1;
513
514 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
515 internals = bonded_eth_dev->data->dev_private;
516
517 rte_spinlock_lock(&internals->lock);
518
519 retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id);
520
521 rte_spinlock_unlock(&internals->lock);
522
523 return retval;
524 }
525
526 static int
527 __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
528 {
529 struct rte_eth_dev *bonded_eth_dev;
530 struct bond_dev_private *internals;
531 struct rte_eth_dev *slave_eth_dev;
532 int i, slave_idx;
533
534 if (valid_slave_port_id(slave_port_id) != 0)
535 return -1;
536
537 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
538 internals = bonded_eth_dev->data->dev_private;
539
540 /* first remove from active slave list */
541 slave_idx = find_slave_by_id(internals->active_slaves,
542 internals->active_slave_count, slave_port_id);
543
544 if (slave_idx < internals->active_slave_count)
545 deactivate_slave(bonded_eth_dev, slave_port_id);
546
547 slave_idx = -1;
548 /* now find in slave list */
549 for (i = 0; i < internals->slave_count; i++)
550 if (internals->slaves[i].port_id == slave_port_id) {
551 slave_idx = i;
552 break;
553 }
554
555 if (slave_idx < 0) {
556 RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %d",
557 internals->slave_count);
558 return -1;
559 }
560
561 /* Un-register link status change callback with bonded device pointer as
562 * argument*/
563 rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
564 bond_ethdev_lsc_event_callback,
565 &rte_eth_devices[bonded_port_id].data->port_id);
566
567 /* Restore original MAC address of slave device */
568 mac_address_set(&rte_eth_devices[slave_port_id],
569 &(internals->slaves[slave_idx].persisted_mac_addr));
570
571 slave_eth_dev = &rte_eth_devices[slave_port_id];
572 slave_remove(internals, slave_eth_dev);
573 slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
574
575 /* first slave in the active list will be the primary by default,
576 * otherwise use first device in list */
577 if (internals->current_primary_port == slave_port_id) {
578 if (internals->active_slave_count > 0)
579 internals->current_primary_port = internals->active_slaves[0];
580 else if (internals->slave_count > 0)
581 internals->current_primary_port = internals->slaves[0].port_id;
582 else
583 internals->primary_port = 0;
584 }
585
586 if (internals->active_slave_count < 1) {
587 /* reset device link properties as no slaves are active */
588 link_properties_reset(&rte_eth_devices[bonded_port_id]);
589
590 /* if no slaves are any longer attached to bonded device and MAC is not
591 * user defined then clear MAC of bonded device as it will be reset
592 * when a new slave is added */
593 if (internals->slave_count < 1 && !internals->user_defined_mac)
594 memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 0,
595 sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs)));
596 }
597 if (internals->slave_count == 0) {
598 internals->rx_offload_capa = 0;
599 internals->tx_offload_capa = 0;
600 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
601 internals->reta_size = 0;
602 internals->candidate_max_rx_pktlen = 0;
603 internals->max_rx_pktlen = 0;
604 }
605 return 0;
606 }
607
608 int
609 rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id)
610 {
611 struct rte_eth_dev *bonded_eth_dev;
612 struct bond_dev_private *internals;
613 int retval;
614
615 if (valid_bonded_port_id(bonded_port_id) != 0)
616 return -1;
617
618 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
619 internals = bonded_eth_dev->data->dev_private;
620
621 rte_spinlock_lock(&internals->lock);
622
623 retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id);
624
625 rte_spinlock_unlock(&internals->lock);
626
627 return retval;
628 }
629
630 int
631 rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode)
632 {
633 if (valid_bonded_port_id(bonded_port_id) != 0)
634 return -1;
635
636 return bond_ethdev_mode_set(&rte_eth_devices[bonded_port_id], mode);
637 }
638
639 int
640 rte_eth_bond_mode_get(uint8_t bonded_port_id)
641 {
642 struct bond_dev_private *internals;
643
644 if (valid_bonded_port_id(bonded_port_id) != 0)
645 return -1;
646
647 internals = rte_eth_devices[bonded_port_id].data->dev_private;
648
649 return internals->mode;
650 }
651
652 int
653 rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id)
654 {
655 struct bond_dev_private *internals;
656
657 if (valid_bonded_port_id(bonded_port_id) != 0)
658 return -1;
659
660 if (valid_slave_port_id(slave_port_id) != 0)
661 return -1;
662
663 internals = rte_eth_devices[bonded_port_id].data->dev_private;
664
665 internals->user_defined_primary_port = 1;
666 internals->primary_port = slave_port_id;
667
668 bond_ethdev_primary_set(internals, slave_port_id);
669
670 return 0;
671 }
672
673 int
674 rte_eth_bond_primary_get(uint8_t bonded_port_id)
675 {
676 struct bond_dev_private *internals;
677
678 if (valid_bonded_port_id(bonded_port_id) != 0)
679 return -1;
680
681 internals = rte_eth_devices[bonded_port_id].data->dev_private;
682
683 if (internals->slave_count < 1)
684 return -1;
685
686 return internals->current_primary_port;
687 }
688
689 int
690 rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len)
691 {
692 struct bond_dev_private *internals;
693 uint8_t i;
694
695 if (valid_bonded_port_id(bonded_port_id) != 0)
696 return -1;
697
698 if (slaves == NULL)
699 return -1;
700
701 internals = rte_eth_devices[bonded_port_id].data->dev_private;
702
703 if (internals->slave_count > len)
704 return -1;
705
706 for (i = 0; i < internals->slave_count; i++)
707 slaves[i] = internals->slaves[i].port_id;
708
709 return internals->slave_count;
710 }
711
712 int
713 rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[],
714 uint8_t len)
715 {
716 struct bond_dev_private *internals;
717
718 if (valid_bonded_port_id(bonded_port_id) != 0)
719 return -1;
720
721 if (slaves == NULL)
722 return -1;
723
724 internals = rte_eth_devices[bonded_port_id].data->dev_private;
725
726 if (internals->active_slave_count > len)
727 return -1;
728
729 memcpy(slaves, internals->active_slaves, internals->active_slave_count);
730
731 return internals->active_slave_count;
732 }
733
734 int
735 rte_eth_bond_mac_address_set(uint8_t bonded_port_id,
736 struct ether_addr *mac_addr)
737 {
738 struct rte_eth_dev *bonded_eth_dev;
739 struct bond_dev_private *internals;
740
741 if (valid_bonded_port_id(bonded_port_id) != 0)
742 return -1;
743
744 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
745 internals = bonded_eth_dev->data->dev_private;
746
747 /* Set MAC Address of Bonded Device */
748 if (mac_address_set(bonded_eth_dev, mac_addr))
749 return -1;
750
751 internals->user_defined_mac = 1;
752
753 /* Update all slave devices MACs*/
754 if (internals->slave_count > 0)
755 return mac_address_slaves_update(bonded_eth_dev);
756
757 return 0;
758 }
759
760 int
761 rte_eth_bond_mac_address_reset(uint8_t bonded_port_id)
762 {
763 struct rte_eth_dev *bonded_eth_dev;
764 struct bond_dev_private *internals;
765
766 if (valid_bonded_port_id(bonded_port_id) != 0)
767 return -1;
768
769 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
770 internals = bonded_eth_dev->data->dev_private;
771
772 internals->user_defined_mac = 0;
773
774 if (internals->slave_count > 0) {
775 /* Set MAC Address of Bonded Device */
776 if (mac_address_set(bonded_eth_dev,
777 &internals->slaves[internals->primary_port].persisted_mac_addr)
778 != 0) {
779 RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
780 return -1;
781 }
782 /* Update all slave devices MAC addresses */
783 return mac_address_slaves_update(bonded_eth_dev);
784 }
785 /* No need to update anything as no slaves present */
786 return 0;
787 }
788
789 int
790 rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy)
791 {
792 struct bond_dev_private *internals;
793
794 if (valid_bonded_port_id(bonded_port_id) != 0)
795 return -1;
796
797 internals = rte_eth_devices[bonded_port_id].data->dev_private;
798
799 switch (policy) {
800 case BALANCE_XMIT_POLICY_LAYER2:
801 internals->balance_xmit_policy = policy;
802 internals->xmit_hash = xmit_l2_hash;
803 break;
804 case BALANCE_XMIT_POLICY_LAYER23:
805 internals->balance_xmit_policy = policy;
806 internals->xmit_hash = xmit_l23_hash;
807 break;
808 case BALANCE_XMIT_POLICY_LAYER34:
809 internals->balance_xmit_policy = policy;
810 internals->xmit_hash = xmit_l34_hash;
811 break;
812
813 default:
814 return -1;
815 }
816 return 0;
817 }
818
819 int
820 rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id)
821 {
822 struct bond_dev_private *internals;
823
824 if (valid_bonded_port_id(bonded_port_id) != 0)
825 return -1;
826
827 internals = rte_eth_devices[bonded_port_id].data->dev_private;
828
829 return internals->balance_xmit_policy;
830 }
831
832 int
833 rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms)
834 {
835 struct bond_dev_private *internals;
836
837 if (valid_bonded_port_id(bonded_port_id) != 0)
838 return -1;
839
840 internals = rte_eth_devices[bonded_port_id].data->dev_private;
841 internals->link_status_polling_interval_ms = internal_ms;
842
843 return 0;
844 }
845
846 int
847 rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id)
848 {
849 struct bond_dev_private *internals;
850
851 if (valid_bonded_port_id(bonded_port_id) != 0)
852 return -1;
853
854 internals = rte_eth_devices[bonded_port_id].data->dev_private;
855
856 return internals->link_status_polling_interval_ms;
857 }
858
859 int
860 rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
861
862 {
863 struct bond_dev_private *internals;
864
865 if (valid_bonded_port_id(bonded_port_id) != 0)
866 return -1;
867
868 internals = rte_eth_devices[bonded_port_id].data->dev_private;
869 internals->link_down_delay_ms = delay_ms;
870
871 return 0;
872 }
873
874 int
875 rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id)
876 {
877 struct bond_dev_private *internals;
878
879 if (valid_bonded_port_id(bonded_port_id) != 0)
880 return -1;
881
882 internals = rte_eth_devices[bonded_port_id].data->dev_private;
883
884 return internals->link_down_delay_ms;
885 }
886
887 int
888 rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
889
890 {
891 struct bond_dev_private *internals;
892
893 if (valid_bonded_port_id(bonded_port_id) != 0)
894 return -1;
895
896 internals = rte_eth_devices[bonded_port_id].data->dev_private;
897 internals->link_up_delay_ms = delay_ms;
898
899 return 0;
900 }
901
902 int
903 rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id)
904 {
905 struct bond_dev_private *internals;
906
907 if (valid_bonded_port_id(bonded_port_id) != 0)
908 return -1;
909
910 internals = rte_eth_devices[bonded_port_id].data->dev_private;
911
912 return internals->link_up_delay_ms;
913 }