EXPORT_SYMBOL(netdev_txq_to_tc);
#ifdef CONFIG_XPS
+struct static_key xps_needed __read_mostly;
+EXPORT_SYMBOL(xps_needed);
+struct static_key xps_rxqs_needed __read_mostly;
+EXPORT_SYMBOL(xps_rxqs_needed);
static DEFINE_MUTEX(xps_map_mutex);
#define xmap_dereference(P) \
rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
struct xps_dev_maps *dev_maps;
unsigned int nr_ids;
- mutex_lock(&xps_map_mutex);
+ if (!static_key_false(&xps_needed))
+ return;
- dev_maps = xmap_dereference(dev->xps_rxqs_map);
- if (dev_maps) {
- nr_ids = dev->num_rx_queues;
- clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset,
- count, true);
+ mutex_lock(&xps_map_mutex);
+ if (static_key_false(&xps_rxqs_needed)) {
+ dev_maps = xmap_dereference(dev->xps_rxqs_map);
+ if (dev_maps) {
+ nr_ids = dev->num_rx_queues;
+ clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
+ offset, count, true);
+ }
}
dev_maps = xmap_dereference(dev->xps_cpus_map);
false);
out_no_maps:
+ if (static_key_enabled(&xps_rxqs_needed))
+ static_key_slow_dec(&xps_rxqs_needed);
+
+ static_key_slow_dec(&xps_needed);
mutex_unlock(&xps_map_mutex);
}
if (!new_dev_maps)
goto out_no_new_maps;
+ static_key_slow_inc(&xps_needed);
+ if (is_rxqs_map)
+ static_key_slow_inc(&xps_rxqs_needed);
+
for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
j < nr_ids;) {
/* copy maps belonging to foreign traffic classes */
struct xps_map *map;
int queue_index = -1;
+ if (!static_key_false(&xps_needed))
+ return -1;
+
rcu_read_lock();
dev_maps = rcu_dereference(dev->xps_cpus_map);
if (dev_maps) {