X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fefx.c;h=dee23b159df2b26deff21e9d18c70339ace15daf;hb=8fe74cf053de7ad2124a894996f84fa890a81093;hp=ab0e09bf154d1f1fe2fdab401465e31cccdb2918;hpb=5a54bd1307471c1cd0521402fe65e2057edcab2f;p=mirror_ubuntu-bionic-kernel.git diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index ab0e09bf154d..dee23b159df2 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -133,6 +133,16 @@ static int phy_flash_cfg; module_param(phy_flash_cfg, int, 0644); MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); +static unsigned irq_adapt_low_thresh = 10000; +module_param(irq_adapt_low_thresh, uint, 0644); +MODULE_PARM_DESC(irq_adapt_low_thresh, + "Threshold score for reducing IRQ moderation"); + +static unsigned irq_adapt_high_thresh = 20000; +module_param(irq_adapt_high_thresh, uint, 0644); +MODULE_PARM_DESC(irq_adapt_high_thresh, + "Threshold score for increasing IRQ moderation"); + /************************************************************************** * * Utility functions and prototypes @@ -182,7 +192,6 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota) channel->rx_pkt = NULL; } - efx_flush_lro(channel); efx_rx_strategy(channel); efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); @@ -224,12 +233,41 @@ static int efx_poll(struct napi_struct *napi, int budget) rx_packets = efx_process_channel(channel, budget); if (rx_packets < budget) { + struct efx_nic *efx = channel->efx; + + if (channel->used_flags & EFX_USED_BY_RX && + efx->irq_rx_adaptive && + unlikely(++channel->irq_count == 1000)) { + unsigned old_irq_moderation = channel->irq_moderation; + + if (unlikely(channel->irq_mod_score < + irq_adapt_low_thresh)) { + channel->irq_moderation = + max_t(int, + channel->irq_moderation - + FALCON_IRQ_MOD_RESOLUTION, + FALCON_IRQ_MOD_RESOLUTION); + } else if (unlikely(channel->irq_mod_score > + irq_adapt_high_thresh)) { + channel->irq_moderation = + min(channel->irq_moderation + + FALCON_IRQ_MOD_RESOLUTION, + efx->irq_rx_moderation); + } + + if (channel->irq_moderation != old_irq_moderation) + falcon_set_int_moderation(channel); + + channel->irq_count = 0; + channel->irq_mod_score = 0; + } + /* There is no race here; although napi_disable() will - * only wait for netif_rx_complete(), this isn't a problem + * only wait for napi_complete(), this isn't a problem * since efx_channel_processed() will have no effect if * interrupts have already been disabled. */ - netif_rx_complete(napi); + napi_complete(napi); efx_channel_processed(channel); } @@ -410,6 +448,9 @@ static void efx_init_channels(struct efx_nic *efx) WARN_ON(channel->rx_pkt != NULL); efx_rx_strategy(channel); + + netif_napi_add(channel->napi_dev, &channel->napi_str, + efx_poll, napi_weight); } } @@ -424,10 +465,6 @@ static void efx_start_channel(struct efx_channel *channel) EFX_LOG(channel->efx, "starting chan %d\n", channel->channel); - if (!(channel->efx->net_dev->flags & IFF_UP)) - netif_napi_add(channel->napi_dev, &channel->napi_str, - efx_poll, napi_weight); - /* The interrupt handler for this channel may set work_pending * as soon as we enable it. Make sure it's cleared before * then. Similarly, make sure it sees the enabled flag set. */ @@ -558,6 +595,8 @@ static void efx_link_status_changed(struct efx_nic *efx) } +static void efx_fini_port(struct efx_nic *efx); + /* This call reinitialises the MAC to pick up new PHY settings. The * caller must hold the mac_lock */ void __efx_reconfigure_port(struct efx_nic *efx) @@ -593,8 +632,8 @@ void __efx_reconfigure_port(struct efx_nic *efx) fail: EFX_ERR(efx, "failed to reconfigure MAC\n"); - efx->phy_op->fini(efx); - efx->port_initialized = false; + efx->port_enabled = false; + efx_fini_port(efx); } /* Reinitialise the MAC to pick up new PHY settings, even if the port is @@ -854,20 +893,27 @@ static void efx_fini_io(struct efx_nic *efx) * interrupts across them. */ static int efx_wanted_rx_queues(void) { - cpumask_t core_mask; + cpumask_var_t core_mask; int count; int cpu; - cpus_clear(core_mask); + if (!alloc_cpumask_var(&core_mask, GFP_KERNEL)) { + printk(KERN_WARNING + "efx.c: allocation failure, irq balancing hobbled\n"); + return 1; + } + + cpumask_clear(core_mask); count = 0; for_each_online_cpu(cpu) { - if (!cpu_isset(cpu, core_mask)) { + if (!cpumask_test_cpu(cpu, core_mask)) { ++count; - cpus_or(core_mask, core_mask, - topology_core_siblings(cpu)); + cpumask_or(core_mask, core_mask, + topology_core_cpumask(cpu)); } } + free_cpumask_var(core_mask); return count; } @@ -990,7 +1036,7 @@ static int efx_probe_nic(struct efx_nic *efx) efx_set_channels(efx); /* Initialise the interrupt moderation settings */ - efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec); + efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); return 0; } @@ -1187,7 +1233,8 @@ void efx_flush_queues(struct efx_nic *efx) **************************************************************************/ /* Set interrupt moderation parameters */ -void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs) +void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, + bool rx_adaptive) { struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; @@ -1197,6 +1244,8 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs) efx_for_each_tx_queue(tx_queue, efx) tx_queue->channel->irq_moderation = tx_usecs; + efx->irq_rx_adaptive = rx_adaptive; + efx->irq_rx_moderation = rx_usecs; efx_for_each_rx_queue(rx_queue, efx) rx_queue->channel->irq_moderation = rx_usecs; } @@ -1269,18 +1318,11 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) static int efx_init_napi(struct efx_nic *efx) { struct efx_channel *channel; - int rc; efx_for_each_channel(channel, efx) { channel->napi_dev = efx->net_dev; - rc = efx_lro_init(&channel->lro_mgr, efx); - if (rc) - goto err; } return 0; - err: - efx_fini_napi(efx); - return rc; } static void efx_fini_napi(struct efx_nic *efx) @@ -1288,7 +1330,6 @@ static void efx_fini_napi(struct efx_nic *efx) struct efx_channel *channel; efx_for_each_channel(channel, efx) { - efx_lro_fini(&channel->lro_mgr); channel->napi_dev = NULL; } } @@ -1676,7 +1717,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, rc = efx->phy_op->init(efx); if (rc) ok = false; - } else + } + if (!ok) efx->port_initialized = false; } @@ -1857,8 +1899,8 @@ static struct efx_phy_operations efx_dummy_phy_operations = { static struct efx_board efx_dummy_board_info = { .init = efx_port_dummy_op_int, - .init_leds = efx_port_dummy_op_int, - .set_fault_led = efx_port_dummy_op_blink, + .init_leds = efx_port_dummy_op_void, + .set_id_led = efx_port_dummy_op_blink, .monitor = efx_port_dummy_op_int, .blink = efx_port_dummy_op_blink, .fini = efx_port_dummy_op_void, @@ -2120,7 +2162,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_TSO); if (lro) - net_dev->features |= NETIF_F_LRO; + net_dev->features |= NETIF_F_GRO; /* Mask for features that also apply to VLAN devices */ net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_TSO);