struct ibmvnic_adapter *adapter = netdev_priv(netdev);
/* ensure that transmissions are stopped if called by do_reset */
- if (adapter->resetting)
+ if (test_bit(0, &adapter->resetting))
netif_tx_disable(netdev);
else
netif_tx_stop_all_queues(netdev);
u8 proto = 0;
netdev_tx_t ret = NETDEV_TX_OK;
- if (adapter->resetting) {
+ if (test_bit(0, &adapter->resetting)) {
if (!netif_subqueue_stopped(netdev, skb))
netif_stop_subqueue(netdev, queue_num);
dev_kfree_skb_any(skb);
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
+ if (test_and_set_bit_lock(0, &adapter->resetting)) {
+ schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
+ IBMVNIC_RESET_DELAY);
+ return;
+ }
+
reset_state = adapter->state;
rwi = get_next_rwi(adapter);
break;
rwi = get_next_rwi(adapter);
+
+ if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
+ rwi->reset_reason == VNIC_RESET_MOBILITY))
+ adapter->force_reset_recovery = true;
}
if (adapter->wait_for_reset) {
free_all_rwi(adapter);
}
- adapter->resetting = false;
+ clear_bit_unlock(0, &adapter->resetting);
+}
+
+static void __ibmvnic_delayed_reset(struct work_struct *work)
+{
+ struct ibmvnic_adapter *adapter;
+
+ adapter = container_of(work, struct ibmvnic_adapter,
+ ibmvnic_delayed_reset.work);
+ __ibmvnic_reset(&adapter->ibmvnic_reset);
}
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
- adapter->resetting = true;
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
schedule_work(&adapter->ibmvnic_reset);
u16 offset;
u8 flags = 0;
- if (unlikely(adapter->resetting &&
+ if (unlikely(test_bit(0, &adapter->resetting) &&
adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
napi_complete_done(napi, frames_processed);
return 1;
}
- if (adapter->resetting &&
+ if (test_bit(0, &adapter->resetting) &&
adapter->reset_reason == VNIC_RESET_MOBILITY) {
u64 val = (0xff000000) | scrq->hw_irq;
if (rc) {
if (rc == H_CLOSED) {
dev_warn(dev, "CRQ Queue closed\n");
- if (adapter->resetting)
+ if (test_bit(0, &adapter->resetting))
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
}
case IBMVNIC_CRQ_XPORT_EVENT:
netif_carrier_off(netdev);
adapter->crq.active = false;
- if (adapter->resetting)
+ if (test_bit(0, &adapter->resetting))
adapter->force_reset_recovery = true;
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
dev_info(dev, "Migrated, re-enabling adapter\n");
return -1;
}
- if (adapter->resetting && !adapter->wait_for_reset &&
+ if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
adapter->reset_reason != VNIC_RESET_MOBILITY) {
if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues) {
spin_lock_init(&adapter->stats_lock);
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
+ INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
+ __ibmvnic_delayed_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
spin_lock_init(&adapter->rwi_lock);
init_completion(&adapter->init_done);
- adapter->resetting = false;
+ clear_bit(0, &adapter->resetting);
do {
rc = init_crq_queue(adapter);