4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
41 #include "rte_eth_bond.h"
42 #include "rte_eth_bond_private.h"
43 #include "rte_eth_bond_8023ad_private.h"
45 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
47 const char pmd_bond_driver_name
[] = "rte_bond_pmd";
50 check_for_bonded_ethdev(const struct rte_eth_dev
*eth_dev
)
52 /* Check valid pointer */
53 if (eth_dev
->data
->drv_name
== NULL
)
56 /* return 0 if driver name matches */
57 return eth_dev
->data
->drv_name
!= pmd_bond_driver_name
;
61 valid_bonded_port_id(uint8_t port_id
)
63 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -1);
64 return check_for_bonded_ethdev(&rte_eth_devices
[port_id
]);
68 valid_slave_port_id(uint8_t port_id
)
70 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -1);
72 /* Verify that port_id refers to a non bonded port */
73 if (check_for_bonded_ethdev(&rte_eth_devices
[port_id
]) == 0)
80 activate_slave(struct rte_eth_dev
*eth_dev
, uint8_t port_id
)
82 struct bond_dev_private
*internals
= eth_dev
->data
->dev_private
;
83 uint8_t active_count
= internals
->active_slave_count
;
85 if (internals
->mode
== BONDING_MODE_8023AD
)
86 bond_mode_8023ad_activate_slave(eth_dev
, port_id
);
88 if (internals
->mode
== BONDING_MODE_TLB
89 || internals
->mode
== BONDING_MODE_ALB
) {
91 internals
->tlb_slaves_order
[active_count
] = port_id
;
94 RTE_ASSERT(internals
->active_slave_count
<
95 (RTE_DIM(internals
->active_slaves
) - 1));
97 internals
->active_slaves
[internals
->active_slave_count
] = port_id
;
98 internals
->active_slave_count
++;
100 if (internals
->mode
== BONDING_MODE_TLB
)
101 bond_tlb_activate_slave(internals
);
102 if (internals
->mode
== BONDING_MODE_ALB
)
103 bond_mode_alb_client_list_upd(eth_dev
);
107 deactivate_slave(struct rte_eth_dev
*eth_dev
, uint8_t port_id
)
110 struct bond_dev_private
*internals
= eth_dev
->data
->dev_private
;
111 uint8_t active_count
= internals
->active_slave_count
;
113 if (internals
->mode
== BONDING_MODE_8023AD
) {
114 bond_mode_8023ad_stop(eth_dev
);
115 bond_mode_8023ad_deactivate_slave(eth_dev
, port_id
);
116 } else if (internals
->mode
== BONDING_MODE_TLB
117 || internals
->mode
== BONDING_MODE_ALB
)
118 bond_tlb_disable(internals
);
120 slave_pos
= find_slave_by_id(internals
->active_slaves
, active_count
,
123 /* If slave was not at the end of the list
124 * shift active slaves up active array list */
125 if (slave_pos
< active_count
) {
127 memmove(internals
->active_slaves
+ slave_pos
,
128 internals
->active_slaves
+ slave_pos
+ 1,
129 (active_count
- slave_pos
) *
130 sizeof(internals
->active_slaves
[0]));
133 RTE_ASSERT(active_count
< RTE_DIM(internals
->active_slaves
));
134 internals
->active_slave_count
= active_count
;
136 if (eth_dev
->data
->dev_started
) {
137 if (internals
->mode
== BONDING_MODE_8023AD
) {
138 bond_mode_8023ad_start(eth_dev
);
139 } else if (internals
->mode
== BONDING_MODE_TLB
) {
140 bond_tlb_enable(internals
);
141 } else if (internals
->mode
== BONDING_MODE_ALB
) {
142 bond_tlb_enable(internals
);
143 bond_mode_alb_client_list_upd(eth_dev
);
149 number_of_sockets(void)
153 const struct rte_memseg
*ms
= rte_eal_get_physmem_layout();
155 for (i
= 0; ((i
< RTE_MAX_MEMSEG
) && (ms
[i
].addr
!= NULL
)); i
++) {
156 if (sockets
< ms
[i
].socket_id
)
157 sockets
= ms
[i
].socket_id
;
160 /* Number of sockets = maximum socket_id + 1 */
165 rte_eth_bond_create(const char *name
, uint8_t mode
, uint8_t socket_id
)
167 struct bond_dev_private
*internals
= NULL
;
168 struct rte_eth_dev
*eth_dev
= NULL
;
169 uint32_t vlan_filter_bmp_size
;
171 /* now do all data allocation - for eth_dev structure, dummy pci driver
172 * and internal (private) data
176 RTE_BOND_LOG(ERR
, "Invalid name specified");
180 if (socket_id
>= number_of_sockets()) {
182 "Invalid socket id specified to create bonded device on.");
186 internals
= rte_zmalloc_socket(name
, sizeof(*internals
), 0, socket_id
);
187 if (internals
== NULL
) {
188 RTE_BOND_LOG(ERR
, "Unable to malloc internals on socket");
192 /* reserve an ethdev entry */
193 eth_dev
= rte_eth_dev_allocate(name
);
194 if (eth_dev
== NULL
) {
195 RTE_BOND_LOG(ERR
, "Unable to allocate rte_eth_dev");
199 eth_dev
->data
->dev_private
= internals
;
200 eth_dev
->data
->nb_rx_queues
= (uint16_t)1;
201 eth_dev
->data
->nb_tx_queues
= (uint16_t)1;
203 TAILQ_INIT(&(eth_dev
->link_intr_cbs
));
205 eth_dev
->data
->dev_link
.link_status
= ETH_LINK_DOWN
;
207 eth_dev
->data
->mac_addrs
= rte_zmalloc_socket(name
, ETHER_ADDR_LEN
, 0,
209 if (eth_dev
->data
->mac_addrs
== NULL
) {
210 RTE_BOND_LOG(ERR
, "Unable to malloc mac_addrs");
214 eth_dev
->data
->dev_started
= 0;
215 eth_dev
->data
->promiscuous
= 0;
216 eth_dev
->data
->scattered_rx
= 0;
217 eth_dev
->data
->all_multicast
= 0;
219 eth_dev
->dev_ops
= &default_dev_ops
;
220 eth_dev
->data
->dev_flags
= RTE_ETH_DEV_INTR_LSC
|
221 RTE_ETH_DEV_DETACHABLE
;
222 eth_dev
->driver
= NULL
;
223 eth_dev
->data
->kdrv
= RTE_KDRV_NONE
;
224 eth_dev
->data
->drv_name
= pmd_bond_driver_name
;
225 eth_dev
->data
->numa_node
= socket_id
;
227 rte_spinlock_init(&internals
->lock
);
229 internals
->port_id
= eth_dev
->data
->port_id
;
230 internals
->mode
= BONDING_MODE_INVALID
;
231 internals
->current_primary_port
= RTE_MAX_ETHPORTS
+ 1;
232 internals
->balance_xmit_policy
= BALANCE_XMIT_POLICY_LAYER2
;
233 internals
->xmit_hash
= xmit_l2_hash
;
234 internals
->user_defined_mac
= 0;
235 internals
->link_props_set
= 0;
237 internals
->link_status_polling_enabled
= 0;
239 internals
->link_status_polling_interval_ms
= DEFAULT_POLLING_INTERVAL_10_MS
;
240 internals
->link_down_delay_ms
= 0;
241 internals
->link_up_delay_ms
= 0;
243 internals
->slave_count
= 0;
244 internals
->active_slave_count
= 0;
245 internals
->rx_offload_capa
= 0;
246 internals
->tx_offload_capa
= 0;
247 internals
->candidate_max_rx_pktlen
= 0;
248 internals
->max_rx_pktlen
= 0;
250 /* Initially allow to choose any offload type */
251 internals
->flow_type_rss_offloads
= ETH_RSS_PROTO_MASK
;
253 memset(internals
->active_slaves
, 0, sizeof(internals
->active_slaves
));
254 memset(internals
->slaves
, 0, sizeof(internals
->slaves
));
256 /* Set mode 4 default configuration */
257 bond_mode_8023ad_setup(eth_dev
, NULL
);
258 if (bond_ethdev_mode_set(eth_dev
, mode
)) {
259 RTE_BOND_LOG(ERR
, "Failed to set bonded device %d mode too %d",
260 eth_dev
->data
->port_id
, mode
);
264 vlan_filter_bmp_size
=
265 rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID
+ 1);
266 internals
->vlan_filter_bmpmem
= rte_malloc(name
, vlan_filter_bmp_size
,
267 RTE_CACHE_LINE_SIZE
);
268 if (internals
->vlan_filter_bmpmem
== NULL
) {
270 "Failed to allocate vlan bitmap for bonded device %u\n",
271 eth_dev
->data
->port_id
);
275 internals
->vlan_filter_bmp
= rte_bitmap_init(ETHER_MAX_VLAN_ID
+ 1,
276 internals
->vlan_filter_bmpmem
, vlan_filter_bmp_size
);
277 if (internals
->vlan_filter_bmp
== NULL
) {
279 "Failed to init vlan bitmap for bonded device %u\n",
280 eth_dev
->data
->port_id
);
281 rte_free(internals
->vlan_filter_bmpmem
);
285 return eth_dev
->data
->port_id
;
289 if (eth_dev
!= NULL
) {
290 rte_free(eth_dev
->data
->mac_addrs
);
291 rte_eth_dev_release_port(eth_dev
);
297 rte_eth_bond_free(const char *name
)
299 struct rte_eth_dev
*eth_dev
= NULL
;
300 struct bond_dev_private
*internals
;
302 /* now free all data allocation - for eth_dev structure,
303 * dummy pci driver and internal (private) data
306 /* find an ethdev entry */
307 eth_dev
= rte_eth_dev_allocated(name
);
311 internals
= eth_dev
->data
->dev_private
;
312 if (internals
->slave_count
!= 0)
315 if (eth_dev
->data
->dev_started
== 1) {
316 bond_ethdev_stop(eth_dev
);
317 bond_ethdev_close(eth_dev
);
320 eth_dev
->dev_ops
= NULL
;
321 eth_dev
->rx_pkt_burst
= NULL
;
322 eth_dev
->tx_pkt_burst
= NULL
;
324 internals
= eth_dev
->data
->dev_private
;
325 rte_bitmap_free(internals
->vlan_filter_bmp
);
326 rte_free(internals
->vlan_filter_bmpmem
);
327 rte_free(eth_dev
->data
->dev_private
);
328 rte_free(eth_dev
->data
->mac_addrs
);
330 rte_eth_dev_release_port(eth_dev
);
336 slave_vlan_filter_set(uint8_t bonded_port_id
, uint8_t slave_port_id
)
338 struct rte_eth_dev
*bonded_eth_dev
;
339 struct bond_dev_private
*internals
;
346 bonded_eth_dev
= &rte_eth_devices
[bonded_port_id
];
347 if (bonded_eth_dev
->data
->dev_conf
.rxmode
.hw_vlan_filter
== 0)
350 internals
= bonded_eth_dev
->data
->dev_private
;
351 found
= rte_bitmap_scan(internals
->vlan_filter_bmp
, &pos
, &slab
);
361 for (i
= 0, mask
= 1;
362 i
< RTE_BITMAP_SLAB_BIT_SIZE
;
364 if (unlikely(slab
& mask
))
365 res
= rte_eth_dev_vlan_filter(slave_port_id
,
368 found
= rte_bitmap_scan(internals
->vlan_filter_bmp
,
370 } while (found
&& first
!= pos
&& res
== 0);
376 __eth_bond_slave_add_lock_free(uint8_t bonded_port_id
, uint8_t slave_port_id
)
378 struct rte_eth_dev
*bonded_eth_dev
, *slave_eth_dev
;
379 struct bond_dev_private
*internals
;
380 struct rte_eth_link link_props
;
381 struct rte_eth_dev_info dev_info
;
383 if (valid_slave_port_id(slave_port_id
) != 0)
386 bonded_eth_dev
= &rte_eth_devices
[bonded_port_id
];
387 internals
= bonded_eth_dev
->data
->dev_private
;
389 slave_eth_dev
= &rte_eth_devices
[slave_port_id
];
390 if (slave_eth_dev
->data
->dev_flags
& RTE_ETH_DEV_BONDED_SLAVE
) {
391 RTE_BOND_LOG(ERR
, "Slave device is already a slave of a bonded device");
395 /* Add slave details to bonded device */
396 slave_eth_dev
->data
->dev_flags
|= RTE_ETH_DEV_BONDED_SLAVE
;
398 rte_eth_dev_info_get(slave_port_id
, &dev_info
);
399 if (dev_info
.max_rx_pktlen
< internals
->max_rx_pktlen
) {
400 RTE_BOND_LOG(ERR
, "Slave (port %u) max_rx_pktlen too small",
405 slave_add(internals
, slave_eth_dev
);
407 /* We need to store slaves reta_size to be able to synchronize RETA for all
408 * slave devices even if its sizes are different.
410 internals
->slaves
[internals
->slave_count
].reta_size
= dev_info
.reta_size
;
412 if (internals
->slave_count
< 1) {
413 /* if MAC is not user defined then use MAC of first slave add to
415 if (!internals
->user_defined_mac
)
416 mac_address_set(bonded_eth_dev
, slave_eth_dev
->data
->mac_addrs
);
418 /* Inherit eth dev link properties from first slave */
419 link_properties_set(bonded_eth_dev
,
420 &(slave_eth_dev
->data
->dev_link
));
422 /* Make primary slave */
423 internals
->primary_port
= slave_port_id
;
424 internals
->current_primary_port
= slave_port_id
;
426 /* Inherit queues settings from first slave */
427 internals
->nb_rx_queues
= slave_eth_dev
->data
->nb_rx_queues
;
428 internals
->nb_tx_queues
= slave_eth_dev
->data
->nb_tx_queues
;
430 internals
->reta_size
= dev_info
.reta_size
;
432 /* Take the first dev's offload capabilities */
433 internals
->rx_offload_capa
= dev_info
.rx_offload_capa
;
434 internals
->tx_offload_capa
= dev_info
.tx_offload_capa
;
435 internals
->flow_type_rss_offloads
= dev_info
.flow_type_rss_offloads
;
437 /* Inherit first slave's max rx packet size */
438 internals
->candidate_max_rx_pktlen
= dev_info
.max_rx_pktlen
;
441 internals
->rx_offload_capa
&= dev_info
.rx_offload_capa
;
442 internals
->tx_offload_capa
&= dev_info
.tx_offload_capa
;
443 internals
->flow_type_rss_offloads
&= dev_info
.flow_type_rss_offloads
;
445 /* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
446 * the power of 2, the lower one is GCD
448 if (internals
->reta_size
> dev_info
.reta_size
)
449 internals
->reta_size
= dev_info
.reta_size
;
451 if (!internals
->max_rx_pktlen
&&
452 dev_info
.max_rx_pktlen
< internals
->candidate_max_rx_pktlen
)
453 internals
->candidate_max_rx_pktlen
= dev_info
.max_rx_pktlen
;
456 bonded_eth_dev
->data
->dev_conf
.rx_adv_conf
.rss_conf
.rss_hf
&=
457 internals
->flow_type_rss_offloads
;
459 internals
->slave_count
++;
461 /* Update all slave devices MACs*/
462 mac_address_slaves_update(bonded_eth_dev
);
464 if (bonded_eth_dev
->data
->dev_started
) {
465 if (slave_configure(bonded_eth_dev
, slave_eth_dev
) != 0) {
466 slave_eth_dev
->data
->dev_flags
&= (~RTE_ETH_DEV_BONDED_SLAVE
);
467 RTE_BOND_LOG(ERR
, "rte_bond_slaves_configure: port=%d",
473 /* Register link status change callback with bonded device pointer as
475 rte_eth_dev_callback_register(slave_port_id
, RTE_ETH_EVENT_INTR_LSC
,
476 bond_ethdev_lsc_event_callback
, &bonded_eth_dev
->data
->port_id
);
478 /* If bonded device is started then we can add the slave to our active
480 if (bonded_eth_dev
->data
->dev_started
) {
481 rte_eth_link_get_nowait(slave_port_id
, &link_props
);
483 if (link_props
.link_status
== ETH_LINK_UP
) {
484 if (internals
->active_slave_count
== 0 &&
485 !internals
->user_defined_primary_port
)
486 bond_ethdev_primary_set(internals
,
489 if (find_slave_by_id(internals
->active_slaves
,
490 internals
->active_slave_count
,
491 slave_port_id
) == internals
->active_slave_count
)
492 activate_slave(bonded_eth_dev
, slave_port_id
);
496 slave_vlan_filter_set(bonded_port_id
, slave_port_id
);
503 rte_eth_bond_slave_add(uint8_t bonded_port_id
, uint8_t slave_port_id
)
505 struct rte_eth_dev
*bonded_eth_dev
;
506 struct bond_dev_private
*internals
;
510 /* Verify that port id's are valid bonded and slave ports */
511 if (valid_bonded_port_id(bonded_port_id
) != 0)
514 bonded_eth_dev
= &rte_eth_devices
[bonded_port_id
];
515 internals
= bonded_eth_dev
->data
->dev_private
;
517 rte_spinlock_lock(&internals
->lock
);
519 retval
= __eth_bond_slave_add_lock_free(bonded_port_id
, slave_port_id
);
521 rte_spinlock_unlock(&internals
->lock
);
527 __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id
, uint8_t slave_port_id
)
529 struct rte_eth_dev
*bonded_eth_dev
;
530 struct bond_dev_private
*internals
;
531 struct rte_eth_dev
*slave_eth_dev
;
534 if (valid_slave_port_id(slave_port_id
) != 0)
537 bonded_eth_dev
= &rte_eth_devices
[bonded_port_id
];
538 internals
= bonded_eth_dev
->data
->dev_private
;
540 /* first remove from active slave list */
541 slave_idx
= find_slave_by_id(internals
->active_slaves
,
542 internals
->active_slave_count
, slave_port_id
);
544 if (slave_idx
< internals
->active_slave_count
)
545 deactivate_slave(bonded_eth_dev
, slave_port_id
);
548 /* now find in slave list */
549 for (i
= 0; i
< internals
->slave_count
; i
++)
550 if (internals
->slaves
[i
].port_id
== slave_port_id
) {
556 RTE_BOND_LOG(ERR
, "Couldn't find slave in port list, slave count %d",
557 internals
->slave_count
);
561 /* Un-register link status change callback with bonded device pointer as
563 rte_eth_dev_callback_unregister(slave_port_id
, RTE_ETH_EVENT_INTR_LSC
,
564 bond_ethdev_lsc_event_callback
,
565 &rte_eth_devices
[bonded_port_id
].data
->port_id
);
567 /* Restore original MAC address of slave device */
568 mac_address_set(&rte_eth_devices
[slave_port_id
],
569 &(internals
->slaves
[slave_idx
].persisted_mac_addr
));
571 slave_eth_dev
= &rte_eth_devices
[slave_port_id
];
572 slave_remove(internals
, slave_eth_dev
);
573 slave_eth_dev
->data
->dev_flags
&= (~RTE_ETH_DEV_BONDED_SLAVE
);
575 /* first slave in the active list will be the primary by default,
576 * otherwise use first device in list */
577 if (internals
->current_primary_port
== slave_port_id
) {
578 if (internals
->active_slave_count
> 0)
579 internals
->current_primary_port
= internals
->active_slaves
[0];
580 else if (internals
->slave_count
> 0)
581 internals
->current_primary_port
= internals
->slaves
[0].port_id
;
583 internals
->primary_port
= 0;
586 if (internals
->active_slave_count
< 1) {
587 /* reset device link properties as no slaves are active */
588 link_properties_reset(&rte_eth_devices
[bonded_port_id
]);
590 /* if no slaves are any longer attached to bonded device and MAC is not
591 * user defined then clear MAC of bonded device as it will be reset
592 * when a new slave is added */
593 if (internals
->slave_count
< 1 && !internals
->user_defined_mac
)
594 memset(rte_eth_devices
[bonded_port_id
].data
->mac_addrs
, 0,
595 sizeof(*(rte_eth_devices
[bonded_port_id
].data
->mac_addrs
)));
597 if (internals
->slave_count
== 0) {
598 internals
->rx_offload_capa
= 0;
599 internals
->tx_offload_capa
= 0;
600 internals
->flow_type_rss_offloads
= ETH_RSS_PROTO_MASK
;
601 internals
->reta_size
= 0;
602 internals
->candidate_max_rx_pktlen
= 0;
603 internals
->max_rx_pktlen
= 0;
609 rte_eth_bond_slave_remove(uint8_t bonded_port_id
, uint8_t slave_port_id
)
611 struct rte_eth_dev
*bonded_eth_dev
;
612 struct bond_dev_private
*internals
;
615 if (valid_bonded_port_id(bonded_port_id
) != 0)
618 bonded_eth_dev
= &rte_eth_devices
[bonded_port_id
];
619 internals
= bonded_eth_dev
->data
->dev_private
;
621 rte_spinlock_lock(&internals
->lock
);
623 retval
= __eth_bond_slave_remove_lock_free(bonded_port_id
, slave_port_id
);
625 rte_spinlock_unlock(&internals
->lock
);
631 rte_eth_bond_mode_set(uint8_t bonded_port_id
, uint8_t mode
)
633 if (valid_bonded_port_id(bonded_port_id
) != 0)
636 return bond_ethdev_mode_set(&rte_eth_devices
[bonded_port_id
], mode
);
640 rte_eth_bond_mode_get(uint8_t bonded_port_id
)
642 struct bond_dev_private
*internals
;
644 if (valid_bonded_port_id(bonded_port_id
) != 0)
647 internals
= rte_eth_devices
[bonded_port_id
].data
->dev_private
;
649 return internals
->mode
;
653 rte_eth_bond_primary_set(uint8_t bonded_port_id
, uint8_t slave_port_id
)
655 struct bond_dev_private
*internals
;
657 if (valid_bonded_port_id(bonded_port_id
) != 0)
660 if (valid_slave_port_id(slave_port_id
) != 0)
663 internals
= rte_eth_devices
[bonded_port_id
].data
->dev_private
;
665 internals
->user_defined_primary_port
= 1;
666 internals
->primary_port
= slave_port_id
;
668 bond_ethdev_primary_set(internals
, slave_port_id
);
674 rte_eth_bond_primary_get(uint8_t bonded_port_id
)
676 struct bond_dev_private
*internals
;
678 if (valid_bonded_port_id(bonded_port_id
) != 0)
681 internals
= rte_eth_devices
[bonded_port_id
].data
->dev_private
;
683 if (internals
->slave_count
< 1)
686 return internals
->current_primary_port
;
690 rte_eth_bond_slaves_get(uint8_t bonded_port_id
, uint8_t slaves
[], uint8_t len
)
692 struct bond_dev_private
*internals
;
695 if (valid_bonded_port_id(bonded_port_id
) != 0)
701 internals
= rte_eth_devices
[bonded_port_id
].data
->dev_private
;
703 if (internals
->slave_count
> len
)
706 for (i
= 0; i
< internals
->slave_count
; i
++)
707 slaves
[i
] = internals
->slaves
[i
].port_id
;
709 return internals
->slave_count
;
713 rte_eth_bond_active_slaves_get(uint8_t bonded_port_id
, uint8_t slaves
[],
716 struct bond_dev_private
*internals
;
718 if (valid_bonded_port_id(bonded_port_id
) != 0)
724 internals
= rte_eth_devices
[bonded_port_id
].data
->dev_private
;
726 if (internals
->active_slave_count
> len
)
729 memcpy(slaves
, internals
->active_slaves
, internals
->active_slave_count
);
731 return internals
->active_slave_count
;
735 rte_eth_bond_mac_address_set(uint8_t bonded_port_id
,
736 struct ether_addr
*mac_addr
)
738 struct rte_eth_dev
*bonded_eth_dev
;
739 struct bond_dev_private
*internals
;
741 if (valid_bonded_port_id(bonded_port_id
) != 0)
744 bonded_eth_dev
= &rte_eth_devices
[bonded_port_id
];
745 internals
= bonded_eth_dev
->data
->dev_private
;
747 /* Set MAC Address of Bonded Device */
748 if (mac_address_set(bonded_eth_dev
, mac_addr
))
751 internals
->user_defined_mac
= 1;
753 /* Update all slave devices MACs*/
754 if (internals
->slave_count
> 0)
755 return mac_address_slaves_update(bonded_eth_dev
);
761 rte_eth_bond_mac_address_reset(uint8_t bonded_port_id
)
763 struct rte_eth_dev
*bonded_eth_dev
;
764 struct bond_dev_private
*internals
;
766 if (valid_bonded_port_id(bonded_port_id
) != 0)
769 bonded_eth_dev
= &rte_eth_devices
[bonded_port_id
];
770 internals
= bonded_eth_dev
->data
->dev_private
;
772 internals
->user_defined_mac
= 0;
774 if (internals
->slave_count
> 0) {
775 /* Set MAC Address of Bonded Device */
776 if (mac_address_set(bonded_eth_dev
,
777 &internals
->slaves
[internals
->primary_port
].persisted_mac_addr
)
779 RTE_BOND_LOG(ERR
, "Failed to set MAC address on bonded device");
782 /* Update all slave devices MAC addresses */
783 return mac_address_slaves_update(bonded_eth_dev
);
785 /* No need to update anything as no slaves present */
790 rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id
, uint8_t policy
)
792 struct bond_dev_private
*internals
;
794 if (valid_bonded_port_id(bonded_port_id
) != 0)
797 internals
= rte_eth_devices
[bonded_port_id
].data
->dev_private
;
800 case BALANCE_XMIT_POLICY_LAYER2
:
801 internals
->balance_xmit_policy
= policy
;
802 internals
->xmit_hash
= xmit_l2_hash
;
804 case BALANCE_XMIT_POLICY_LAYER23
:
805 internals
->balance_xmit_policy
= policy
;
806 internals
->xmit_hash
= xmit_l23_hash
;
808 case BALANCE_XMIT_POLICY_LAYER34
:
809 internals
->balance_xmit_policy
= policy
;
810 internals
->xmit_hash
= xmit_l34_hash
;
820 rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id
)
822 struct bond_dev_private
*internals
;
824 if (valid_bonded_port_id(bonded_port_id
) != 0)
827 internals
= rte_eth_devices
[bonded_port_id
].data
->dev_private
;
829 return internals
->balance_xmit_policy
;
833 rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id
, uint32_t internal_ms
)
835 struct bond_dev_private
*internals
;
837 if (valid_bonded_port_id(bonded_port_id
) != 0)
840 internals
= rte_eth_devices
[bonded_port_id
].data
->dev_private
;
841 internals
->link_status_polling_interval_ms
= internal_ms
;
847 rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id
)
849 struct bond_dev_private
*internals
;
851 if (valid_bonded_port_id(bonded_port_id
) != 0)
854 internals
= rte_eth_devices
[bonded_port_id
].data
->dev_private
;
856 return internals
->link_status_polling_interval_ms
;
860 rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id
, uint32_t delay_ms
)
863 struct bond_dev_private
*internals
;
865 if (valid_bonded_port_id(bonded_port_id
) != 0)
868 internals
= rte_eth_devices
[bonded_port_id
].data
->dev_private
;
869 internals
->link_down_delay_ms
= delay_ms
;
875 rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id
)
877 struct bond_dev_private
*internals
;
879 if (valid_bonded_port_id(bonded_port_id
) != 0)
882 internals
= rte_eth_devices
[bonded_port_id
].data
->dev_private
;
884 return internals
->link_down_delay_ms
;
888 rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id
, uint32_t delay_ms
)
891 struct bond_dev_private
*internals
;
893 if (valid_bonded_port_id(bonded_port_id
) != 0)
896 internals
= rte_eth_devices
[bonded_port_id
].data
->dev_private
;
897 internals
->link_up_delay_ms
= delay_ms
;
903 rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id
)
905 struct bond_dev_private
*internals
;
907 if (valid_bonded_port_id(bonded_port_id
) != 0)
910 internals
= rte_eth_devices
[bonded_port_id
].data
->dev_private
;
912 return internals
->link_up_delay_ms
;