1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
9 #include <rte_flow_driver.h>
10 #include <rte_cycles.h>
12 #include "failsafe_private.h"
14 /** Print a message out of a flow error. */
16 fs_flow_complain(struct rte_flow_error
*error
)
18 static const char *const errstrlist
[] = {
19 [RTE_FLOW_ERROR_TYPE_NONE
] = "no error",
20 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED
] = "cause unspecified",
21 [RTE_FLOW_ERROR_TYPE_HANDLE
] = "flow rule (handle)",
22 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP
] = "group field",
23 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY
] = "priority field",
24 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS
] = "ingress field",
25 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS
] = "egress field",
26 [RTE_FLOW_ERROR_TYPE_ATTR
] = "attributes structure",
27 [RTE_FLOW_ERROR_TYPE_ITEM_NUM
] = "pattern length",
28 [RTE_FLOW_ERROR_TYPE_ITEM
] = "specific pattern item",
29 [RTE_FLOW_ERROR_TYPE_ACTION_NUM
] = "number of actions",
30 [RTE_FLOW_ERROR_TYPE_ACTION
] = "specific action",
36 if ((unsigned int)error
->type
>= RTE_DIM(errstrlist
) ||
37 !errstrlist
[error
->type
])
38 errstr
= "unknown type";
40 errstr
= errstrlist
[error
->type
];
41 ERROR("Caught error type %d (%s): %s%s\n",
43 error
->cause
? (snprintf(buf
, sizeof(buf
), "cause: %p, ",
44 error
->cause
), buf
) : "",
45 error
->message
? error
->message
: "(no stated reason)");
50 eth_dev_flow_isolate_set(struct rte_eth_dev
*dev
,
51 struct sub_device
*sdev
)
53 struct rte_flow_error ferror
;
56 if (!PRIV(dev
)->flow_isolated
) {
57 DEBUG("Flow isolation already disabled");
59 DEBUG("Enabling flow isolation");
60 ret
= rte_flow_isolate(PORT_ID(sdev
),
61 PRIV(dev
)->flow_isolated
,
64 fs_flow_complain(&ferror
);
72 fs_eth_dev_conf_apply(struct rte_eth_dev
*dev
,
73 struct sub_device
*sdev
)
75 struct rte_eth_dev
*edev
;
76 struct rte_vlan_filter_conf
*vfc1
;
77 struct rte_vlan_filter_conf
*vfc2
;
78 struct rte_flow
*flow
;
79 struct rte_flow_error ferror
;
85 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
88 rxq
= dev
->data
->rx_queues
[i
];
89 ret
= rte_eth_rx_queue_setup(PORT_ID(sdev
), i
,
90 rxq
->info
.nb_desc
, rxq
->socket_id
,
91 &rxq
->info
.conf
, rxq
->info
.mp
);
93 ERROR("rx_queue_setup failed");
98 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
101 txq
= dev
->data
->tx_queues
[i
];
102 ret
= rte_eth_tx_queue_setup(PORT_ID(sdev
), i
,
103 txq
->info
.nb_desc
, txq
->socket_id
,
106 ERROR("tx_queue_setup failed");
110 /* dev_link.link_status */
111 if (dev
->data
->dev_link
.link_status
!=
112 edev
->data
->dev_link
.link_status
) {
113 DEBUG("Configuring link_status");
114 if (dev
->data
->dev_link
.link_status
)
115 ret
= rte_eth_dev_set_link_up(PORT_ID(sdev
));
117 ret
= rte_eth_dev_set_link_down(PORT_ID(sdev
));
119 ERROR("Failed to apply link_status");
123 DEBUG("link_status already set");
126 if (dev
->data
->promiscuous
!= edev
->data
->promiscuous
) {
127 DEBUG("Configuring promiscuous");
128 if (dev
->data
->promiscuous
)
129 rte_eth_promiscuous_enable(PORT_ID(sdev
));
131 rte_eth_promiscuous_disable(PORT_ID(sdev
));
133 DEBUG("promiscuous already set");
136 if (dev
->data
->all_multicast
!= edev
->data
->all_multicast
) {
137 DEBUG("Configuring all_multicast");
138 if (dev
->data
->all_multicast
)
139 rte_eth_allmulticast_enable(PORT_ID(sdev
));
141 rte_eth_allmulticast_disable(PORT_ID(sdev
));
143 DEBUG("all_multicast already set");
146 if (dev
->data
->mtu
!= edev
->data
->mtu
) {
147 DEBUG("Configuring MTU");
148 ret
= rte_eth_dev_set_mtu(PORT_ID(sdev
), dev
->data
->mtu
);
150 ERROR("Failed to apply MTU");
154 DEBUG("MTU already set");
157 DEBUG("Configuring default MAC address");
158 ret
= rte_eth_dev_default_mac_addr_set(PORT_ID(sdev
),
159 &dev
->data
->mac_addrs
[0]);
161 ERROR("Setting default MAC address failed");
165 if (PRIV(dev
)->nb_mac_addr
> 1)
166 DEBUG("Configure additional MAC address%s",
167 (PRIV(dev
)->nb_mac_addr
> 2 ? "es" : ""));
168 for (i
= 1; i
< PRIV(dev
)->nb_mac_addr
; i
++) {
169 struct ether_addr
*ea
;
171 ea
= &dev
->data
->mac_addrs
[i
];
172 ret
= rte_eth_dev_mac_addr_add(PORT_ID(sdev
), ea
,
173 PRIV(dev
)->mac_addr_pool
[i
]);
175 char ea_fmt
[ETHER_ADDR_FMT_SIZE
];
177 ether_format_addr(ea_fmt
, ETHER_ADDR_FMT_SIZE
, ea
);
178 ERROR("Adding MAC address %s failed", ea_fmt
);
183 vfc1
= &dev
->data
->vlan_filter_conf
;
184 vfc2
= &edev
->data
->vlan_filter_conf
;
185 if (memcmp(vfc1
, vfc2
, sizeof(struct rte_vlan_filter_conf
))) {
191 DEBUG("Configuring VLAN filter");
192 for (i
= 0; i
< RTE_DIM(vfc1
->ids
); i
++) {
193 if (vfc1
->ids
[i
] == 0)
198 /* count trailing zeroes */
199 vbit
= ~ids
& (ids
- 1);
200 /* clear least significant bit set */
201 ids
^= (ids
^ (ids
- 1)) ^ vbit
;
202 for (; vbit
; vlan_id
++)
204 ret
= rte_eth_dev_vlan_filter(
205 PORT_ID(sdev
), vlan_id
, 1);
207 ERROR("Failed to apply VLAN filter %hu",
214 DEBUG("VLAN filter already set");
217 if (TAILQ_EMPTY(&PRIV(dev
)->flow_list
)) {
218 DEBUG("rte_flow already set");
220 DEBUG("Resetting rte_flow configuration");
221 ret
= rte_flow_flush(PORT_ID(sdev
), &ferror
);
223 fs_flow_complain(&ferror
);
228 DEBUG("Configuring rte_flow");
229 TAILQ_FOREACH(flow
, &PRIV(dev
)->flow_list
, next
) {
230 DEBUG("Creating flow #%" PRIu32
, i
++);
231 flow
->flows
[SUB_ID(sdev
)] =
232 rte_flow_create(PORT_ID(sdev
),
242 fs_flow_complain(&ferror
);
250 fs_dev_remove(struct sub_device
*sdev
)
256 switch (sdev
->state
) {
258 failsafe_rx_intr_uninstall_subdevice(sdev
);
259 rte_eth_dev_stop(PORT_ID(sdev
));
260 sdev
->state
= DEV_ACTIVE
;
263 failsafe_eth_dev_unregister_callbacks(sdev
);
264 rte_eth_dev_close(PORT_ID(sdev
));
265 sdev
->state
= DEV_PROBED
;
268 ret
= rte_eal_hotplug_remove(sdev
->bus
->name
,
271 ERROR("Bus detach failed for sub_device %u",
274 rte_eth_dev_release_port(ETH(sdev
));
276 sdev
->state
= DEV_PARSED
;
280 sdev
->state
= DEV_UNDEFINED
;
285 failsafe_hotplug_alarm_install(sdev
->fs_dev
);
289 fs_dev_stats_save(struct sub_device
*sdev
)
291 struct rte_eth_stats stats
;
294 /* Attempt to read current stats. */
295 err
= rte_eth_stats_get(PORT_ID(sdev
), &stats
);
297 uint64_t timestamp
= sdev
->stats_snapshot
.timestamp
;
299 WARN("Could not access latest statistics from sub-device %d.\n",
302 WARN("Using latest snapshot taken before %"PRIu64
" seconds.\n",
303 (rte_rdtsc() - timestamp
) / rte_get_tsc_hz());
305 failsafe_stats_increment(&PRIV(sdev
->fs_dev
)->stats_accumulator
,
306 err
? &sdev
->stats_snapshot
.stats
: &stats
);
307 memset(&sdev
->stats_snapshot
, 0, sizeof(sdev
->stats_snapshot
));
311 fs_rxtx_clean(struct sub_device
*sdev
)
315 for (i
= 0; i
< ETH(sdev
)->data
->nb_rx_queues
; i
++)
316 if (FS_ATOMIC_RX(sdev
, i
))
318 for (i
= 0; i
< ETH(sdev
)->data
->nb_tx_queues
; i
++)
319 if (FS_ATOMIC_TX(sdev
, i
))
325 failsafe_eth_dev_unregister_callbacks(struct sub_device
*sdev
)
331 if (sdev
->rmv_callback
) {
332 ret
= rte_eth_dev_callback_unregister(PORT_ID(sdev
),
333 RTE_ETH_EVENT_INTR_RMV
,
334 failsafe_eth_rmv_event_callback
,
337 WARN("Failed to unregister RMV callback for sub_device"
338 " %d", SUB_ID(sdev
));
339 sdev
->rmv_callback
= 0;
341 if (sdev
->lsc_callback
) {
342 ret
= rte_eth_dev_callback_unregister(PORT_ID(sdev
),
343 RTE_ETH_EVENT_INTR_LSC
,
344 failsafe_eth_lsc_event_callback
,
347 WARN("Failed to unregister LSC callback for sub_device"
348 " %d", SUB_ID(sdev
));
349 sdev
->lsc_callback
= 0;
354 failsafe_dev_remove(struct rte_eth_dev
*dev
)
356 struct sub_device
*sdev
;
359 FOREACH_SUBDEV_STATE(sdev
, i
, dev
, DEV_ACTIVE
)
360 if (sdev
->remove
&& fs_rxtx_clean(sdev
)) {
361 if (fs_lock(dev
, 1) != 0)
363 fs_dev_stats_save(sdev
);
370 failsafe_eth_dev_state_sync(struct rte_eth_dev
*dev
)
372 struct sub_device
*sdev
;
377 if (PRIV(dev
)->state
< DEV_PARSED
)
380 ret
= failsafe_args_parse_subs(dev
);
384 if (PRIV(dev
)->state
< DEV_PROBED
)
386 ret
= failsafe_eal_init(dev
);
389 if (PRIV(dev
)->state
< DEV_ACTIVE
)
392 FOREACH_SUBDEV(sdev
, i
, dev
) {
393 if (sdev
->state
== DEV_PROBED
) {
394 inactive
|= UINT32_C(1) << i
;
395 ret
= eth_dev_flow_isolate_set(dev
, sdev
);
397 ERROR("Could not apply configuration to sub_device %d",
403 ret
= dev
->dev_ops
->dev_configure(dev
);
406 FOREACH_SUBDEV(sdev
, i
, dev
) {
407 if (inactive
& (UINT32_C(1) << i
)) {
408 ret
= fs_eth_dev_conf_apply(dev
, sdev
);
410 ERROR("Could not apply configuration to sub_device %d",
417 * If new devices have been configured, check if
418 * the link state has changed.
421 dev
->dev_ops
->link_update(dev
, 1);
422 if (PRIV(dev
)->state
< DEV_STARTED
)
424 ret
= dev
->dev_ops
->dev_start(dev
);
429 FOREACH_SUBDEV(sdev
, i
, dev
)
430 if (sdev
->state
!= PRIV(dev
)->state
)
436 failsafe_stats_increment(struct rte_eth_stats
*to
, struct rte_eth_stats
*from
)
440 RTE_ASSERT(to
!= NULL
&& from
!= NULL
);
441 to
->ipackets
+= from
->ipackets
;
442 to
->opackets
+= from
->opackets
;
443 to
->ibytes
+= from
->ibytes
;
444 to
->obytes
+= from
->obytes
;
445 to
->imissed
+= from
->imissed
;
446 to
->ierrors
+= from
->ierrors
;
447 to
->oerrors
+= from
->oerrors
;
448 to
->rx_nombuf
+= from
->rx_nombuf
;
449 for (i
= 0; i
< RTE_ETHDEV_QUEUE_STAT_CNTRS
; i
++) {
450 to
->q_ipackets
[i
] += from
->q_ipackets
[i
];
451 to
->q_opackets
[i
] += from
->q_opackets
[i
];
452 to
->q_ibytes
[i
] += from
->q_ibytes
[i
];
453 to
->q_obytes
[i
] += from
->q_obytes
[i
];
454 to
->q_errors
[i
] += from
->q_errors
[i
];
459 failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused
,
460 enum rte_eth_event_type event __rte_unused
,
461 void *cb_arg
, void *out __rte_unused
)
463 struct sub_device
*sdev
= cb_arg
;
465 fs_lock(sdev
->fs_dev
, 0);
466 /* Switch as soon as possible tx_dev. */
467 fs_switch_dev(sdev
->fs_dev
, sdev
);
468 /* Use safe bursts in any case. */
469 set_burst_fn(sdev
->fs_dev
, 1);
471 * Async removal, the sub-PMD will try to unregister
472 * the callback at the source of the current thread context.
475 fs_unlock(sdev
->fs_dev
, 0);
480 failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused
,
481 enum rte_eth_event_type event __rte_unused
,
482 void *cb_arg
, void *out __rte_unused
)
484 struct rte_eth_dev
*dev
= cb_arg
;
487 ret
= dev
->dev_ops
->link_update(dev
, 0);
488 /* We must pass on the LSC event */
490 return _rte_eth_dev_callback_process(dev
,
491 RTE_ETH_EVENT_INTR_LSC
,
497 /* Take sub-device ownership before it becomes exposed to the application. */
499 failsafe_eth_new_event_callback(uint16_t port_id
,
500 enum rte_eth_event_type event __rte_unused
,
501 void *cb_arg
, void *out __rte_unused
)
503 struct rte_eth_dev
*fs_dev
= cb_arg
;
504 struct sub_device
*sdev
;
505 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
508 FOREACH_SUBDEV_STATE(sdev
, i
, fs_dev
, DEV_PARSED
) {
509 if (sdev
->state
>= DEV_PROBED
)
511 if (strcmp(sdev
->devargs
.name
, dev
->device
->name
) != 0)
513 rte_eth_dev_owner_set(port_id
, &PRIV(fs_dev
)->my_owner
);
514 /* The actual owner will be checked after the port probing. */