]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Sep 2013 16:36:28 +0000 (09:36 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Sep 2013 16:36:28 +0000 (09:36 -0700)
Pull trivial tree from Jiri Kosina:
 "The usual trivial updates all over the tree -- mostly typo fixes and
  documentation updates"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial: (52 commits)
  doc: Documentation/cputopology.txt fix typo
  treewide: Convert retrun typos to return
  Fix comment typo for init_cma_reserved_pageblock
  Documentation/trace: Correcting and extending tracepoint documentation
  mm/hotplug: fix a typo in Documentation/memory-hotplug.txt
  power: Documentation: Update s2ram link
  doc: fix a typo in Documentation/00-INDEX
  Documentation/printk-formats.txt: No casts needed for u64/s64
  doc: Fix typo "is is" in Documentations
  treewide: Fix printks with 0x%#
  zram: doc fixes
  Documentation/kmemcheck: update kmemcheck documentation
  doc: documentation/hwspinlock.txt fix typo
  PM / Hibernate: add section for resume options
  doc: filesystems : Fix typo in Documentations/filesystems
  scsi/megaraid fixed several typos in comments
  ppc: init_32: Fix error typo "CONFIG_START_KERNEL"
  treewide: Add __GFP_NOWARN to k.alloc calls with v.alloc fallbacks
  page_isolation: Fix a comment typo in test_pages_isolated()
  doc: fix a typo about irq affinity
  ...

20 files changed:
1  2 
CREDITS
Documentation/acpi/enumeration.txt
Documentation/filesystems/ext4.txt
Documentation/memory-hotplug.txt
Documentation/printk-formats.txt
arch/parisc/kernel/signal.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/sis/sis900.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/scsi/hpsa.c
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_mr.c
drivers/spi/spi-bitbang.c
fs/ext4/super.c
kernel/workqueue.c
mm/memory-failure.c

diff --combined CREDITS
index 646a0a9ad6d1a94c505148399d7cb89e3c7cc3f3,33a2f2d8300959dee1f3549156c5ef8ac068fb48..9416a9a8b95e6c4404c80ade376feff7b6df406a
+++ b/CREDITS
@@@ -637,14 -637,13 +637,13 @@@ S: 14509 NE 39th Street #109
  S: Bellevue, Washington 98007
  S: USA
  
- N: Christopher L. Cheney
- E: ccheney@debian.org
- E: ccheney@cheney.cx
- W: http://www.cheney.cx
+ N: Chris Cheney
+ E: chris.cheney@gmail.com
+ E: ccheney@redhat.com
  P: 1024D/8E384AF2 2D31 1927 87D7 1F24 9FF9  1BC5 D106 5AB3 8E38 4AF2
  D: Vista Imaging usb webcam driver
- S: 314 Prince of Wales
- S: Conroe, TX 77304
+ S: 2308 Therrell Way
+ S: McKinney, TX 75070
  S: USA
  
  N: Stuart Cheshire
@@@ -1120,7 -1119,6 +1119,7 @@@ D: author of userfs filesyste
  D: Improved mmap and munmap handling
  D: General mm minor tidyups
  D: autofs v4 maintainer
 +D: Xen subsystem
  S: 987 Alabama St
  S: San Francisco
  S: CA, 94110
index d977778b5e67b78d521fd59707d0ddcd05a3ffc3,64139a189a4c4f79bd1a43f3d9a5671bd40b0681..aca4e69121b7a2bb8ab38d2002066bcab49af915
@@@ -207,7 -207,7 +207,7 @@@ passing those. One idea is to return th
                        Return (Local0)
                }
  
- Then the at25 SPI driver can get this configation by calling _DSM on its
+ Then the at25 SPI driver can get this configuration by calling _DSM on its
  ACPI handle like:
  
        struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
  I2C serial bus support
  ~~~~~~~~~~~~~~~~~~~~~~
  The slaves behind I2C bus controller only need to add the ACPI IDs like
 -with the platform and SPI drivers. However the I2C bus controller driver
 -needs to call acpi_i2c_register_devices() after it has added the adapter.
 -
 -An I2C bus (controller) driver does:
 -
 -      ...
 -      ret = i2c_add_numbered_adapter(adapter);
 -      if (ret)
 -              /* handle error */
 -
 -      of_i2c_register_devices(adapter);
 -      /* Enumerate the slave devices behind this bus via ACPI */
 -      acpi_i2c_register_devices(adapter);
 +with the platform and SPI drivers. The I2C core automatically enumerates
 +any slave devices behind the controller device once the adapter is
 +registered.
  
  Below is an example of how to add ACPI support to the existing mpu3050
  input driver:
index b91cfaaf6a0f459ef14a27098e993a2f6131fd34,a92c5aa8ce22ef11e48d1192d2f063f674fc65a4..919a3293aaa4249f81f0d907d9867d6d80f9a135
@@@ -2,7 -2,7 +2,7 @@@
  Ext4 Filesystem
  ===============
  
- Ext4 is an an advanced level of the ext3 filesystem which incorporates
+ Ext4 is an advanced level of the ext3 filesystem which incorporates
  scalability and reliability enhancements for supporting large filesystems
  (64 bit) in keeping with increasing disk capacities and state-of-the-art
  feature requirements.
@@@ -144,12 -144,11 +144,12 @@@ journal_async_commit    Commit block can b
                        mount the device. This will enable 'journal_checksum'
                        internally.
  
 +journal_path=path
  journal_dev=devnum    When the external journal device's major/minor numbers
 -                      have changed, this option allows the user to specify
 +                      have changed, these options allow the user to specify
                        the new journal location.  The journal device is
 -                      identified through its new major/minor numbers encoded
 -                      in devnum.
 +                      identified through either its new major/minor numbers
 +                      encoded in devnum, or via a path to the device.
  
  norecovery            Don't load the journal on mounting.  Note that
  noload                        if the filesystem was not unmounted cleanly,
index 8fd254c735897af37c4544e5a6ecc514819f1760,d7a9b0a90d455492dbd16d56a13c860e9ea9e9f0..58340d50f8a614ac577bd5b36f60d31d170f9ea9
@@@ -163,7 -163,7 +163,7 @@@ a recent addition and not present on ol
                      at read:  contains online/offline state of memory.
                      at write: user can specify "online_kernel",
                      "online_movable", "online", "offline" command
-                     which will be performed on al sections in the block.
+                     which will be performed on all sections in the block.
  'phys_device'     : read-only: designed to show the name of physical memory
                      device.  This is not well implemented now.
  'removable'       : read-only: contains an integer value indicating
@@@ -210,15 -210,13 +210,15 @@@ If memory device is found, memory hotpl
  
  4.2 Notify memory hot-add event by hand
  ------------
 -In some environments, especially virtualized environment, firmware will not
 -notify memory hotplug event to the kernel. For such environment, "probe"
 -interface is supported. This interface depends on CONFIG_ARCH_MEMORY_PROBE.
 -
 -Now, CONFIG_ARCH_MEMORY_PROBE is supported only by powerpc but it does not
 -contain highly architecture codes. Please add config if you need "probe"
 -interface.
 +On powerpc, the firmware does not notify a memory hotplug event to the kernel.
 +Therefore, "probe" interface is supported to notify the event to the kernel.
 +This interface depends on CONFIG_ARCH_MEMORY_PROBE.
 +
 +CONFIG_ARCH_MEMORY_PROBE is supported on powerpc only. On x86, this config
 +option is disabled by default since ACPI notifies a memory hotplug event to
 +the kernel, which performs its hotplug operation as the result. Please
 +enable this option if you need the "probe" interface for testing purposes
 +on x86.
  
  Probe interface is located at
  /sys/devices/system/memory/probe
index 9552a3299ec9a2e27e88db91482b41c30c9de24e,44fc924ad008b5a5db9ce39376415b72524ca098..445ad743ec814ee4571c175884d3ccce4ffda11a
@@@ -97,7 -97,7 +97,7 @@@ IPv4 addresses
  
        %pI4    1.2.3.4
        %pi4    001.002.003.004
-       %p[Ii][hnbl]
+       %p[Ii]4[hnbl]
  
        For printing IPv4 dot-separated decimal addresses. The 'I4' and 'i4'
        specifiers result in a printed address with ('i4') or without ('I4')
@@@ -168,15 -168,6 +168,15 @@@ UUID/GUID addresses
        Where no additional specifiers are used the default little endian
        order with lower case hex characters will be printed.
  
 +dentry names:
 +      %pd{,2,3,4}
 +      %pD{,2,3,4}
 +
 +      For printing dentry name; if we race with d_move(), the name might be
 +      a mix of old and new ones, but it won't oops.  %pd dentry is a safer
 +      equivalent of %s dentry->d_name.name we used to use, %pd<n> prints
 +      n last components.  %pD does the same thing for struct file.
 +
  struct va_format:
  
        %pV
  
  u64 SHOULD be printed with %llu/%llx, (unsigned long long):
  
-       printk("%llu", (unsigned long long)u64_var);
+       printk("%llu", u64_var);
  
  s64 SHOULD be printed with %lld/%llx, (long long):
  
-       printk("%lld", (long long)s64_var);
+       printk("%lld", s64_var);
  
  If <type> is dependent on a config option for its size (e.g., sector_t,
  blkcnt_t) or is architecture-dependent for its size (e.g., tcflag_t), use a
index 07349b00268750581d1badb79b2c2487732831b8,35c5bf1307aa573ef20e324c534e9739dddaaf2d..1cba8f29bb492326b156423f8dd12805221dbfc5
   * this. */
  #define A(__x)        ((unsigned long)(__x))
  
 -/*
 - * Atomically swap in the new signal mask, and wait for a signal.
 - */
 -#ifdef CONFIG_64BIT
 -#include "sys32.h"
 -#endif
 -
  /*
   * Do a signal return - restore sigcontext.
   */
@@@ -78,7 -85,7 +78,7 @@@ restore_sigcontext(struct sigcontext __
        err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq));
        err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq));
        err |= __get_user(regs->sar, &sc->sc_sar);
-       DBG(2,"restore_sigcontext: iaoq is 0x%#lx / 0x%#lx\n", 
+       DBG(2,"restore_sigcontext: iaoq is %#lx / %#lx\n",
                        regs->iaoq[0],regs->iaoq[1]);
        DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]);
        return err;
index 79ac77cf62d90665dd05d180570a3a30bb38b717,5d5f2685ee2b3b581e5ec49ac7db00f6cef5230e..0d0665ca6f1914f77fa3aa23b68254d42fcd0c86
@@@ -60,7 -60,6 +60,7 @@@
  #include <linux/workqueue.h>
  #include <net/neighbour.h>
  #include <net/netevent.h>
 +#include <net/addrconf.h>
  #include <asm/uaccess.h>
  
  #include "cxgb4.h"
  #include "t4fw_api.h"
  #include "l2t.h"
  
 +#include <../drivers/net/bonding/bonding.h>
 +
 +#ifdef DRV_VERSION
 +#undef DRV_VERSION
 +#endif
  #define DRV_VERSION "2.0.0-ko"
  #define DRV_DESC "Chelsio T4/T5 Network Driver"
  
@@@ -406,9 -400,6 +406,9 @@@ static struct dentry *cxgb4_debugfs_roo
  
  static LIST_HEAD(adapter_list);
  static DEFINE_MUTEX(uld_mutex);
 +/* Adapter list to be accessed from atomic context */
 +static LIST_HEAD(adap_rcu_list);
 +static DEFINE_SPINLOCK(adap_rcu_lock);
  static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
  static const char *uld_str[] = { "RDMA", "iSCSI" };
  
@@@ -1142,7 -1133,7 +1142,7 @@@ out:    release_firmware(fw)
   */
  void *t4_alloc_mem(size_t size)
  {
-       void *p = kzalloc(size, GFP_KERNEL);
+       void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
  
        if (!p)
                p = vzalloc(size);
@@@ -3236,38 -3227,6 +3236,38 @@@ static int tid_init(struct tid_info *t
        return 0;
  }
  
 +static int cxgb4_clip_get(const struct net_device *dev,
 +                        const struct in6_addr *lip)
 +{
 +      struct adapter *adap;
 +      struct fw_clip_cmd c;
 +
 +      adap = netdev2adap(dev);
 +      memset(&c, 0, sizeof(c));
 +      c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
 +                      FW_CMD_REQUEST | FW_CMD_WRITE);
 +      c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
 +      *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
 +      *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
 +      return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
 +}
 +
 +static int cxgb4_clip_release(const struct net_device *dev,
 +                            const struct in6_addr *lip)
 +{
 +      struct adapter *adap;
 +      struct fw_clip_cmd c;
 +
 +      adap = netdev2adap(dev);
 +      memset(&c, 0, sizeof(c));
 +      c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
 +                      FW_CMD_REQUEST | FW_CMD_READ);
 +      c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
 +      *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
 +      *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
 +      return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
 +}
 +
  /**
   *    cxgb4_create_server - create an IP server
   *    @dev: the device
@@@ -3287,7 -3246,6 +3287,7 @@@ int cxgb4_create_server(const struct ne
        struct sk_buff *skb;
        struct adapter *adap;
        struct cpl_pass_open_req *req;
 +      int ret;
  
        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
        if (!skb)
        req->opt0 = cpu_to_be64(TX_CHAN(chan));
        req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
                                SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
 -      return t4_mgmt_tx(adap, skb);
 +      ret = t4_mgmt_tx(adap, skb);
 +      return net_xmit_eval(ret);
  }
  EXPORT_SYMBOL(cxgb4_create_server);
  
 +/*    cxgb4_create_server6 - create an IPv6 server
 + *    @dev: the device
 + *    @stid: the server TID
 + *    @sip: local IPv6 address to bind server to
 + *    @sport: the server's TCP port
 + *    @queue: queue to direct messages from this server to
 + *
 + *    Create an IPv6 server for the given port and address.
 + *    Returns <0 on error and one of the %NET_XMIT_* values on success.
 + */
 +int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
 +                       const struct in6_addr *sip, __be16 sport,
 +                       unsigned int queue)
 +{
 +      unsigned int chan;
 +      struct sk_buff *skb;
 +      struct adapter *adap;
 +      struct cpl_pass_open_req6 *req;
 +      int ret;
 +
 +      skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 +      if (!skb)
 +              return -ENOMEM;
 +
 +      adap = netdev2adap(dev);
 +      req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
 +      INIT_TP_WR(req, 0);
 +      OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
 +      req->local_port = sport;
 +      req->peer_port = htons(0);
 +      req->local_ip_hi = *(__be64 *)(sip->s6_addr);
 +      req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
 +      req->peer_ip_hi = cpu_to_be64(0);
 +      req->peer_ip_lo = cpu_to_be64(0);
 +      chan = rxq_to_chan(&adap->sge, queue);
 +      req->opt0 = cpu_to_be64(TX_CHAN(chan));
 +      req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
 +                              SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
 +      ret = t4_mgmt_tx(adap, skb);
 +      return net_xmit_eval(ret);
 +}
 +EXPORT_SYMBOL(cxgb4_create_server6);
 +
 +int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
 +                      unsigned int queue, bool ipv6)
 +{
 +      struct sk_buff *skb;
 +      struct adapter *adap;
 +      struct cpl_close_listsvr_req *req;
 +      int ret;
 +
 +      adap = netdev2adap(dev);
 +
 +      skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 +      if (!skb)
 +              return -ENOMEM;
 +
 +      req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
 +      INIT_TP_WR(req, 0);
 +      OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
 +      req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
 +                              LISTSVR_IPV6(0)) | QUEUENO(queue));
 +      ret = t4_mgmt_tx(adap, skb);
 +      return net_xmit_eval(ret);
 +}
 +EXPORT_SYMBOL(cxgb4_remove_server);
 +
  /**
   *    cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
   *    @mtus: the HW MTU table
@@@ -3831,10 -3721,6 +3831,10 @@@ static void attach_ulds(struct adapter 
  {
        unsigned int i;
  
 +      spin_lock(&adap_rcu_lock);
 +      list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
 +      spin_unlock(&adap_rcu_lock);
 +
        mutex_lock(&uld_mutex);
        list_add_tail(&adap->list_node, &adapter_list);
        for (i = 0; i < CXGB4_ULD_MAX; i++)
@@@ -3860,10 -3746,6 +3860,10 @@@ static void detach_ulds(struct adapter 
                netevent_registered = false;
        }
        mutex_unlock(&uld_mutex);
 +
 +      spin_lock(&adap_rcu_lock);
 +      list_del_rcu(&adap->rcu_node);
 +      spin_unlock(&adap_rcu_lock);
  }
  
  static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
@@@ -3927,168 -3809,6 +3927,168 @@@ int cxgb4_unregister_uld(enum cxgb4_ul
  }
  EXPORT_SYMBOL(cxgb4_unregister_uld);
  
 +/* Check if netdev on which event is occured belongs to us or not. Return
 + * suceess (1) if it belongs otherwise failure (0).
 + */
 +static int cxgb4_netdev(struct net_device *netdev)
 +{
 +      struct adapter *adap;
 +      int i;
 +
 +      spin_lock(&adap_rcu_lock);
 +      list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
 +              for (i = 0; i < MAX_NPORTS; i++)
 +                      if (adap->port[i] == netdev) {
 +                              spin_unlock(&adap_rcu_lock);
 +                              return 1;
 +                      }
 +      spin_unlock(&adap_rcu_lock);
 +      return 0;
 +}
 +
 +static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
 +                  unsigned long event)
 +{
 +      int ret = NOTIFY_DONE;
 +
 +      rcu_read_lock();
 +      if (cxgb4_netdev(event_dev)) {
 +              switch (event) {
 +              case NETDEV_UP:
 +                      ret = cxgb4_clip_get(event_dev,
 +                              (const struct in6_addr *)ifa->addr.s6_addr);
 +                      if (ret < 0) {
 +                              rcu_read_unlock();
 +                              return ret;
 +                      }
 +                      ret = NOTIFY_OK;
 +                      break;
 +              case NETDEV_DOWN:
 +                      cxgb4_clip_release(event_dev,
 +                              (const struct in6_addr *)ifa->addr.s6_addr);
 +                      ret = NOTIFY_OK;
 +                      break;
 +              default:
 +                      break;
 +              }
 +      }
 +      rcu_read_unlock();
 +      return ret;
 +}
 +
 +static int cxgb4_inet6addr_handler(struct notifier_block *this,
 +              unsigned long event, void *data)
 +{
 +      struct inet6_ifaddr *ifa = data;
 +      struct net_device *event_dev;
 +      int ret = NOTIFY_DONE;
 +      struct bonding *bond = netdev_priv(ifa->idev->dev);
 +      struct slave *slave;
 +      struct pci_dev *first_pdev = NULL;
 +
 +      if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
 +              event_dev = vlan_dev_real_dev(ifa->idev->dev);
 +              ret = clip_add(event_dev, ifa, event);
 +      } else if (ifa->idev->dev->flags & IFF_MASTER) {
 +              /* It is possible that two different adapters are bonded in one
 +               * bond. We need to find such different adapters and add clip
 +               * in all of them only once.
 +               */
 +              read_lock(&bond->lock);
 +              bond_for_each_slave(bond, slave) {
 +                      if (!first_pdev) {
 +                              ret = clip_add(slave->dev, ifa, event);
 +                              /* If clip_add is success then only initialize
 +                               * first_pdev since it means it is our device
 +                               */
 +                              if (ret == NOTIFY_OK)
 +                                      first_pdev = to_pci_dev(
 +                                                      slave->dev->dev.parent);
 +                      } else if (first_pdev !=
 +                                 to_pci_dev(slave->dev->dev.parent))
 +                                      ret = clip_add(slave->dev, ifa, event);
 +              }
 +              read_unlock(&bond->lock);
 +      } else
 +              ret = clip_add(ifa->idev->dev, ifa, event);
 +
 +      return ret;
 +}
 +
 +static struct notifier_block cxgb4_inet6addr_notifier = {
 +      .notifier_call = cxgb4_inet6addr_handler
 +};
 +
 +/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
 + * a physical device.
 + * The physical device reference is needed to send the actul CLIP command.
 + */
 +static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
 +{
 +      struct inet6_dev *idev = NULL;
 +      struct inet6_ifaddr *ifa;
 +      int ret = 0;
 +
 +      idev = __in6_dev_get(root_dev);
 +      if (!idev)
 +              return ret;
 +
 +      read_lock_bh(&idev->lock);
 +      list_for_each_entry(ifa, &idev->addr_list, if_list) {
 +              ret = cxgb4_clip_get(dev,
 +                              (const struct in6_addr *)ifa->addr.s6_addr);
 +              if (ret < 0)
 +                      break;
 +      }
 +      read_unlock_bh(&idev->lock);
 +
 +      return ret;
 +}
 +
 +static int update_root_dev_clip(struct net_device *dev)
 +{
 +      struct net_device *root_dev = NULL;
 +      int i, ret = 0;
 +
 +      /* First populate the real net device's IPv6 addresses */
 +      ret = update_dev_clip(dev, dev);
 +      if (ret)
 +              return ret;
 +
 +      /* Parse all bond and vlan devices layered on top of the physical dev */
 +      for (i = 0; i < VLAN_N_VID; i++) {
 +              root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
 +              if (!root_dev)
 +                      continue;
 +
 +              ret = update_dev_clip(root_dev, dev);
 +              if (ret)
 +                      break;
 +      }
 +      return ret;
 +}
 +
 +static void update_clip(const struct adapter *adap)
 +{
 +      int i;
 +      struct net_device *dev;
 +      int ret;
 +
 +      rcu_read_lock();
 +
 +      for (i = 0; i < MAX_NPORTS; i++) {
 +              dev = adap->port[i];
 +              ret = 0;
 +
 +              if (dev)
 +                      ret = update_root_dev_clip(dev);
 +
 +              if (ret < 0)
 +                      break;
 +      }
 +      rcu_read_unlock();
 +}
 +
  /**
   *    cxgb_up - enable the adapter
   *    @adap: adapter being enabled
@@@ -4134,7 -3854,6 +4134,7 @@@ static int cxgb_up(struct adapter *adap
        t4_intr_enable(adap);
        adap->flags |= FULL_INIT_DONE;
        notify_ulds(adap, CXGB4_STATE_UP);
 +      update_clip(adap);
   out:
        return err;
   irq_err:
@@@ -6151,15 -5870,11 +6151,15 @@@ static int __init cxgb4_init_module(voi
        ret = pci_register_driver(&cxgb4_driver);
        if (ret < 0)
                debugfs_remove(cxgb4_debugfs_root);
 +
 +      register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
 +
        return ret;
  }
  
  static void __exit cxgb4_cleanup_module(void)
  {
 +      unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
        pci_unregister_driver(&cxgb4_driver);
        debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
        flush_workqueue(workq);
index a1818dae47b653c6a13a61fc608608fdd0037ade,3c51b774a65c534a13cd85b0b7ce0491fd10c486..3ca00e05f23d2ade287d6c1d9047410eeac43114
@@@ -11,7 -11,6 +11,7 @@@
  #include <linux/ipv6.h>
  #include <linux/ethtool.h>
  #include <linux/interrupt.h>
 +#include <linux/aer.h>
  
  #define QLCNIC_MAX_TX_QUEUES          1
  #define RSS_HASHTYPE_IP_TCP           0x3
@@@ -68,8 -67,6 +68,8 @@@ static const struct qlcnic_mailbox_meta
        {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
        {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
        {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
 +      {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
 +      {QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50},
  };
  
  const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@@ -152,7 -149,7 +152,7 @@@ static struct qlcnic_hardware_ops qlcni
        .get_mac_address                = qlcnic_83xx_get_mac_address,
        .setup_intr                     = qlcnic_83xx_setup_intr,
        .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
 -      .mbx_cmd                        = qlcnic_83xx_mbx_op,
 +      .mbx_cmd                        = qlcnic_83xx_issue_cmd,
        .get_func_no                    = qlcnic_83xx_get_func_no,
        .api_lock                       = qlcnic_83xx_cam_lock,
        .api_unlock                     = qlcnic_83xx_cam_unlock,
        .get_board_info                 = qlcnic_83xx_get_port_info,
        .set_mac_filter_count           = qlcnic_83xx_set_mac_filter_count,
        .free_mac_list                  = qlcnic_82xx_free_mac_list,
 +      .io_error_detected              = qlcnic_83xx_io_error_detected,
 +      .io_slot_reset                  = qlcnic_83xx_io_slot_reset,
 +      .io_resume                      = qlcnic_83xx_io_resume,
 +
  };
  
  static struct qlcnic_nic_template qlcnic_83xx_ops = {
@@@ -235,17 -228,17 +235,17 @@@ static int __qlcnic_set_win_base(struc
        return 0;
  }
  
 -int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr)
 +int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
 +                              int *err)
  {
 -      int ret;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
  
 -      ret = __qlcnic_set_win_base(adapter, (u32) addr);
 -      if (!ret) {
 +      *err = __qlcnic_set_win_base(adapter, (u32) addr);
 +      if (!*err) {
                return QLCRDX(ahw, QLCNIC_WILDCARD);
        } else {
                dev_err(&adapter->pdev->dev,
 -                      "%s failed, addr = 0x%x\n", __func__, (int)addr);
 +                      "%s failed, addr = 0x%lx\n", __func__, addr);
                return -EIO;
        }
  }
@@@ -268,7 -261,7 +268,7 @@@ int qlcnic_83xx_wrt_reg_indirect(struc
        }
  }
  
 -int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
 +int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
  {
        int err, i, num_msix;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
@@@ -369,10 -362,6 +369,10 @@@ static inline void qlcnic_83xx_get_mbx_
                                     struct qlcnic_cmd_args *cmd)
  {
        int i;
 +
 +      if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
 +              return;
 +
        for (i = 0; i < cmd->rsp.num; i++)
                cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
  }
@@@ -409,33 -398,24 +409,33 @@@ irqreturn_t qlcnic_83xx_clear_legacy_in
        return IRQ_HANDLED;
  }
  
 +static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
 +{
 +      atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
 +      complete(&mbx->completion);
 +}
 +
  static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
  {
 -      u32 resp, event;
 +      u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
        unsigned long flags;
  
 -      spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
 -
 +      spin_lock_irqsave(&mbx->aen_lock, flags);
        resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
        if (!(resp & QLCNIC_SET_OWNER))
                goto out;
  
        event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
 -      if (event &  QLCNIC_MBX_ASYNC_EVENT)
 +      if (event &  QLCNIC_MBX_ASYNC_EVENT) {
                __qlcnic_83xx_process_aen(adapter);
 -
 +      } else {
 +              if (atomic_read(&mbx->rsp_status) != rsp_status)
 +                      qlcnic_83xx_notify_mbx_response(mbx);
 +      }
  out:
        qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
 -      spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
 +      spin_unlock_irqrestore(&mbx->aen_lock, flags);
  }
  
  irqreturn_t qlcnic_83xx_intr(int irq, void *data)
@@@ -535,7 -515,7 +535,7 @@@ int qlcnic_83xx_setup_mbx_intr(struct q
        }
  
        /* Enable mailbox interrupt */
 -      qlcnic_83xx_enable_mbx_intrpt(adapter);
 +      qlcnic_83xx_enable_mbx_interrupt(adapter);
  
        return err;
  }
@@@ -581,7 -561,7 +581,7 @@@ void qlcnic_83xx_cam_unlock(struct qlcn
  void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
                          loff_t offset, size_t size)
  {
 -      int ret;
 +      int ret = 0;
        u32 data;
  
        if (qlcnic_api_lock(adapter)) {
                return;
        }
  
 -      ret = qlcnic_83xx_rd_reg_indirect(adapter, (u32) offset);
 +      data = QLCRD32(adapter, (u32) offset, &ret);
        qlcnic_api_unlock(adapter);
  
        if (ret == -EIO) {
                        __func__, (u32)offset);
                return;
        }
 -      data = ret;
        memcpy(buf, &data, size);
  }
  
@@@ -648,7 -629,7 +648,7 @@@ void qlcnic_83xx_set_mac_filter_count(s
        ahw->max_uc_count = count;
  }
  
 -void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter)
 +void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter)
  {
        u32 val;
  
@@@ -702,14 -683,11 +702,14 @@@ static void qlcnic_83xx_handle_link_aen
  static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
                                            u32 data[]);
  
 -static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
 -                          struct qlcnic_cmd_args *cmd)
 +void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
 +                   struct qlcnic_cmd_args *cmd)
  {
        int i;
  
 +      if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
 +              return;
 +
        dev_info(&adapter->pdev->dev,
                 "Host MBX regs(%d)\n", cmd->req.num);
        for (i = 0; i < cmd->req.num; i++) {
        pr_info("\n");
  }
  
 -/* Mailbox response for mac rcode */
 -u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
 +static void qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter,
 +                                              struct qlcnic_cmd_args *cmd)
  {
 -      u32 fw_data;
 -      u8 mac_cmd_rcode;
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      int opcode = LSW(cmd->req.arg[0]);
 +      unsigned long max_loops;
  
 -      fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
 -      mac_cmd_rcode = (u8)fw_data;
 -      if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
 -          mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
 -          mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
 -              return QLCNIC_RCODE_SUCCESS;
 -      return 1;
 -}
 +      max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP;
  
 -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time)
 -{
 -      u32 data;
 -      struct qlcnic_hardware_context *ahw = adapter->ahw;
 -      /* wait for mailbox completion */
 -      do {
 -              data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
 -              if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) {
 -                      data = QLCNIC_RCODE_TIMEOUT;
 -                      break;
 -              }
 -              mdelay(1);
 -      } while (!data);
 -      return data;
 +      for (; max_loops; max_loops--) {
 +              if (atomic_read(&cmd->rsp_status) ==
 +                  QLC_83XX_MBX_RESPONSE_ARRIVED)
 +                      return;
 +
 +              udelay(1);
 +      }
 +
 +      dev_err(&adapter->pdev->dev,
 +              "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
 +              __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode);
 +      flush_workqueue(ahw->mailbox->work_q);
 +      return;
  }
  
 -int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
 -                     struct qlcnic_cmd_args *cmd)
 +int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
 +                        struct qlcnic_cmd_args *cmd)
  {
 -      int i;
 -      u16 opcode;
 -      u8 mbx_err_code;
 -      unsigned long flags;
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 -      u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0;
 +      int cmd_type, err, opcode;
 +      unsigned long timeout;
  
        opcode = LSW(cmd->req.arg[0]);
 -      if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
 -              dev_info(&adapter->pdev->dev,
 -                       "Mailbox cmd attempted, 0x%x\n", opcode);
 -              dev_info(&adapter->pdev->dev, "Mailbox detached\n");
 -              return 0;
 +      cmd_type = cmd->type;
 +      err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
 +      if (err) {
 +              dev_err(&adapter->pdev->dev,
 +                      "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n",
 +                      __func__, opcode, cmd->type, ahw->pci_func,
 +                      ahw->op_mode);
 +              return err;
        }
  
 -      spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
 -      mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
 -
 -      if (mbx_val) {
 -              QLCDB(adapter, DRV,
 -                    "Mailbox cmd attempted, 0x%x\n", opcode);
 -              QLCDB(adapter, DRV,
 -                    "Mailbox not available, 0x%x, collect FW dump\n",
 -                    mbx_val);
 -              cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
 -              spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
 -              return cmd->rsp.arg[0];
 -      }
 -
 -      /* Fill in mailbox registers */
 -      mbx_cmd = cmd->req.arg[0];
 -      writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
 -      for (i = 1; i < cmd->req.num; i++)
 -              writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
 -
 -      /* Signal FW about the impending command */
 -      QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
 -poll:
 -      rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
 -      if (rsp != QLCNIC_RCODE_TIMEOUT) {
 -              /* Get the FW response data */
 -              fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
 -              if (fw_data &  QLCNIC_MBX_ASYNC_EVENT) {
 -                      __qlcnic_83xx_process_aen(adapter);
 -                      goto poll;
 -              }
 -              mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
 -              rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
 -              opcode = QLCNIC_MBX_RSP(fw_data);
 -              qlcnic_83xx_get_mbx_data(adapter, cmd);
 -
 -              switch (mbx_err_code) {
 -              case QLCNIC_MBX_RSP_OK:
 -              case QLCNIC_MBX_PORT_RSP_OK:
 -                      rsp = QLCNIC_RCODE_SUCCESS;
 -                      break;
 -              default:
 -                      if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
 -                              rsp = qlcnic_83xx_mac_rcode(adapter);
 -                              if (!rsp)
 -                                      goto out;
 -                      }
 +      switch (cmd_type) {
 +      case QLC_83XX_MBX_CMD_WAIT:
 +              if (!wait_for_completion_timeout(&cmd->completion, timeout)) {
                        dev_err(&adapter->pdev->dev,
 -                              "MBX command 0x%x failed with err:0x%x\n",
 -                              opcode, mbx_err_code);
 -                      rsp = mbx_err_code;
 -                      qlcnic_dump_mbx(adapter, cmd);
 -                      break;
 +                              "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
 +                              __func__, opcode, cmd_type, ahw->pci_func,
 +                              ahw->op_mode);
 +                      flush_workqueue(mbx->work_q);
                }
 -              goto out;
 +              break;
 +      case QLC_83XX_MBX_CMD_NO_WAIT:
 +              return 0;
 +      case QLC_83XX_MBX_CMD_BUSY_WAIT:
 +              qlcnic_83xx_poll_for_mbx_completion(adapter, cmd);
 +              break;
 +      default:
 +              dev_err(&adapter->pdev->dev,
 +                      "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
 +                      __func__, opcode, cmd_type, ahw->pci_func,
 +                      ahw->op_mode);
 +              qlcnic_83xx_detach_mailbox_work(adapter);
        }
  
 -      dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
 -              QLCNIC_MBX_RSP(mbx_cmd));
 -      rsp = QLCNIC_RCODE_TIMEOUT;
 -out:
 -      /* clear fw mbx control register */
 -      QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
 -      spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
 -      return rsp;
 +      return cmd->rsp_opcode;
  }
  
  int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
        u32 temp;
        const struct qlcnic_mailbox_metadata *mbx_tbl;
  
 +      memset(mbx, 0, sizeof(struct qlcnic_cmd_args));
        mbx_tbl = qlcnic_83xx_mbx_tbl;
        size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
        for (i = 0; i < size; i++) {
                        memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
                        temp = adapter->ahw->fw_hal_version << 29;
                        mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
 +                      mbx->cmd_op = type;
                        return 0;
                }
        }
@@@ -866,9 -889,9 +866,9 @@@ static void qlcnic_83xx_handle_idc_comp
  
  void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
  {
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
        u32 event[QLC_83XX_MBX_AEN_CNT];
        int i;
 -      struct qlcnic_hardware_context *ahw = adapter->ahw;
  
        for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
                event[i] = readl(QLCNIC_MBX_FW(ahw, i));
                                   &adapter->idc_aen_work, 0);
                break;
        case QLCNIC_MBX_TIME_EXTEND_EVENT:
 +              ahw->extend_lb_time = event[1] >> 8 & 0xf;
                break;
        case QLCNIC_MBX_BC_EVENT:
                qlcnic_sriov_handle_bc_event(adapter, event[1]);
                dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n",
                         QLCNIC_MBX_RSP(event[0]));
                break;
 +      case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
 +              qlcnic_dcb_handle_aen(adapter, (void *)&event[1]);
 +              break;
        default:
                dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
                        QLCNIC_MBX_RSP(event[0]));
  
  static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
  {
 +      u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 -      u32 resp, event;
 +      struct qlcnic_mailbox *mbx = ahw->mailbox;
        unsigned long flags;
  
 -      spin_lock_irqsave(&ahw->mbx_lock, flags);
 -
 +      spin_lock_irqsave(&mbx->aen_lock, flags);
        resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
        if (resp & QLCNIC_SET_OWNER) {
                event = readl(QLCNIC_MBX_FW(ahw, 0));
 -              if (event &  QLCNIC_MBX_ASYNC_EVENT)
 +              if (event &  QLCNIC_MBX_ASYNC_EVENT) {
                        __qlcnic_83xx_process_aen(adapter);
 +              } else {
 +                      if (atomic_read(&mbx->rsp_status) != rsp_status)
 +                              qlcnic_83xx_notify_mbx_response(mbx);
 +              }
        }
 -
 -      spin_unlock_irqrestore(&ahw->mbx_lock, flags);
 +      spin_unlock_irqrestore(&mbx->aen_lock, flags);
  }
  
  static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
@@@ -954,7 -970,6 +954,7 @@@ void qlcnic_83xx_enable_mbx_poll(struc
                return;
  
        INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
 +      queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0);
  }
  
  void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
@@@ -1341,10 -1356,8 +1341,10 @@@ static int qlcnic_83xx_diag_alloc_res(s
  
        if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
                /* disable and free mailbox interrupt */
 -              if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 +              if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
 +                      qlcnic_83xx_enable_mbx_poll(adapter);
                        qlcnic_83xx_free_mbx_intr(adapter);
 +              }
                adapter->ahw->loopback_state = 0;
                adapter->ahw->hw_ops->setup_link_event(adapter, 1);
        }
@@@ -1365,8 -1378,6 +1365,8 @@@ static void qlcnic_83xx_diag_free_res(s
                for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                        sds_ring = &adapter->recv_ctx->sds_rings[ring];
                        qlcnic_83xx_disable_intr(adapter, sds_ring);
 +                      if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 +                              qlcnic_83xx_enable_mbx_poll(adapter);
                }
        }
  
        if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
                if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
                        err = qlcnic_83xx_setup_mbx_intr(adapter);
 +                      qlcnic_83xx_disable_mbx_poll(adapter);
                        if (err) {
                                dev_err(&adapter->pdev->dev,
                                        "%s: failed to setup mbx interrupt\n",
  
        if (netif_running(netdev))
                __qlcnic_up(adapter, netdev);
 +
 +      if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
 +          !(adapter->flags & QLCNIC_MSIX_ENABLED))
 +              qlcnic_83xx_disable_mbx_poll(adapter);
  out:
        netif_device_attach(netdev);
  }
@@@ -1614,33 -1620,26 +1614,33 @@@ static void qlcnic_83xx_set_interface_i
  
  int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
  {
 -      int err;
 +      struct qlcnic_cmd_args *cmd = NULL;
        u32 temp = 0;
 -      struct qlcnic_cmd_args cmd;
 +      int err;
  
        if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
                return -EIO;
  
 -      err = qlcnic_alloc_mbx_args(&cmd, adapter,
 +      cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
 +      if (!cmd)
 +              return -ENOMEM;
 +
 +      err = qlcnic_alloc_mbx_args(cmd, adapter,
                                    QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
        if (err)
 -              return err;
 +              goto out;
  
 +      cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
        qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
 -      cmd.req.arg[1] = (mode ? 1 : 0) | temp;
 -      err = qlcnic_issue_cmd(adapter, &cmd);
 -      if (err)
 -              dev_info(&adapter->pdev->dev,
 -                       "Promiscuous mode config failed\n");
 +      cmd->req.arg[1] = (mode ? 1 : 0) | temp;
 +      err = qlcnic_issue_cmd(adapter, cmd);
 +      if (!err)
 +              return err;
  
 -      qlcnic_free_mbx_args(&cmd);
 +      qlcnic_free_mbx_args(cmd);
 +
 +out:
 +      kfree(cmd);
        return err;
  }
  
@@@ -1653,7 -1652,7 +1653,7 @@@ int qlcnic_83xx_loopback_test(struct ne
        if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
                netdev_warn(netdev,
                            "Loopback test not supported in non privileged mode\n");
 -              return ret;
 +              return -ENOTSUPP;
        }
  
        if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
        /* Poll for link up event before running traffic */
        do {
                msleep(QLC_83XX_LB_MSLEEP_COUNT);
 -              if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 -                      qlcnic_83xx_process_aen(adapter);
  
                if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
                        netdev_info(netdev,
                                    "Device is resetting, free LB test resources\n");
 -                      ret = -EIO;
 +                      ret = -EBUSY;
                        goto free_diag_res;
                }
                if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
                        netdev_info(netdev,
                                    "Firmware didn't sent link up event to loopback request\n");
 -                      ret = -QLCNIC_FW_NOT_RESPOND;
 +                      ret = -ETIMEDOUT;
                        qlcnic_83xx_clear_lb_mode(adapter, mode);
                        goto free_diag_res;
                }
        /* Make sure carrier is off and queue is stopped during loopback */
        if (netif_running(netdev)) {
                netif_carrier_off(netdev);
 -              netif_stop_queue(netdev);
 +              netif_tx_stop_all_queues(netdev);
        }
  
        ret = qlcnic_do_lb_test(adapter, mode);
@@@ -1716,42 -1717,18 +1716,42 @@@ fail_diag_alloc
        return ret;
  }
  
 +static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
 +                                           u32 *max_wait_count)
 +{
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      int temp;
 +
 +      netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n",
 +                  ahw->extend_lb_time);
 +      temp = ahw->extend_lb_time * 1000;
 +      *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
 +      ahw->extend_lb_time = 0;
 +}
 +
  int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
  {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        struct net_device *netdev = adapter->netdev;
 +      u32 config, max_wait_count;
        int status = 0, loop = 0;
 -      u32 config;
  
 +      ahw->extend_lb_time = 0;
 +      max_wait_count = QLC_83XX_LB_WAIT_COUNT;
        status = qlcnic_83xx_get_port_config(adapter);
        if (status)
                return status;
  
        config = ahw->port_config;
 +
 +      /* Check if port is already in loopback mode */
 +      if ((config & QLC_83XX_CFG_LOOPBACK_HSS) ||
 +          (config & QLC_83XX_CFG_LOOPBACK_EXT)) {
 +              netdev_err(netdev,
 +                         "Port already in Loopback mode.\n");
 +              return -EINPROGRESS;
 +      }
 +
        set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
  
        if (mode == QLCNIC_ILB_MODE)
        /* Wait for Link and IDC Completion AEN */
        do {
                msleep(QLC_83XX_LB_MSLEEP_COUNT);
 -              if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 -                      qlcnic_83xx_process_aen(adapter);
  
                if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
                        netdev_info(netdev,
                                    "Device is resetting, free LB test resources\n");
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
 -                      return -EIO;
 +                      return -EBUSY;
                }
 -              if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
 -                      netdev_err(netdev,
 -                                 "Did not receive IDC completion AEN\n");
 +
 +              if (ahw->extend_lb_time)
 +                      qlcnic_extend_lb_idc_cmpltn_wait(adapter,
 +                                                       &max_wait_count);
 +
 +              if (loop++ > max_wait_count) {
 +                      netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
 +                                 __func__);
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
                        qlcnic_83xx_clear_lb_mode(adapter, mode);
 -                      return -EIO;
 +                      return -ETIMEDOUT;
                }
        } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
  
  int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
  {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      u32 config = ahw->port_config, max_wait_count;
        struct net_device *netdev = adapter->netdev;
        int status = 0, loop = 0;
 -      u32 config = ahw->port_config;
  
 +      ahw->extend_lb_time = 0;
 +      max_wait_count = QLC_83XX_LB_WAIT_COUNT;
        set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
        if (mode == QLCNIC_ILB_MODE)
                ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
        /* Wait for Link and IDC Completion AEN */
        do {
                msleep(QLC_83XX_LB_MSLEEP_COUNT);
 -              if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
 -                      qlcnic_83xx_process_aen(adapter);
  
                if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
                        netdev_info(netdev,
                                    "Device is resetting, free LB test resources\n");
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
 -                      return -EIO;
 +                      return -EBUSY;
                }
  
 -              if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
 -                      netdev_err(netdev,
 -                                 "Did not receive IDC completion AEN\n");
 +              if (ahw->extend_lb_time)
 +                      qlcnic_extend_lb_idc_cmpltn_wait(adapter,
 +                                                       &max_wait_count);
 +
 +              if (loop++ > max_wait_count) {
 +                      netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
 +                                 __func__);
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
 -                      return -EIO;
 +                      return -ETIMEDOUT;
                }
        } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
  
@@@ -1981,31 -1951,25 +1981,31 @@@ static void qlcnic_83xx_set_interface_i
  int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
                                   u16 vlan_id, u8 op)
  {
 -      int err;
 -      u32 *buf, temp = 0;
 -      struct qlcnic_cmd_args cmd;
 +      struct qlcnic_cmd_args *cmd = NULL;
        struct qlcnic_macvlan_mbx mv;
 +      u32 *buf, temp = 0;
 +      int err;
  
        if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
                return -EIO;
  
 -      err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
 +      cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
 +      if (!cmd)
 +              return -ENOMEM;
 +
 +      err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
        if (err)
 -              return err;
 +              goto out;
 +
 +      cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
  
        if (vlan_id)
                op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
                     QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
  
 -      cmd.req.arg[1] = op | (1 << 8);
 +      cmd->req.arg[1] = op | (1 << 8);
        qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
 -      cmd.req.arg[1] |= temp;
 +      cmd->req.arg[1] |= temp;
        mv.vlan = vlan_id;
        mv.mac_addr0 = addr[0];
        mv.mac_addr1 = addr[1];
        mv.mac_addr3 = addr[3];
        mv.mac_addr4 = addr[4];
        mv.mac_addr5 = addr[5];
 -      buf = &cmd.req.arg[2];
 +      buf = &cmd->req.arg[2];
        memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
 -      err = qlcnic_issue_cmd(adapter, &cmd);
 -      if (err)
 -              dev_err(&adapter->pdev->dev,
 -                      "MAC-VLAN %s to CAM failed, err=%d.\n",
 -                      ((op == 1) ? "add " : "delete "), err);
 -      qlcnic_free_mbx_args(&cmd);
 +      err = qlcnic_issue_cmd(adapter, cmd);
 +      if (!err)
 +              return err;
 +
 +      qlcnic_free_mbx_args(cmd);
 +out:
 +      kfree(cmd);
        return err;
  }
  
@@@ -2046,14 -2009,12 +2046,14 @@@ void qlcnic_83xx_configure_mac(struct q
        cmd->req.arg[1] = type;
  }
  
 -int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
 +int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
 +                              u8 function)
  {
        int err, i;
        struct qlcnic_cmd_args cmd;
        u32 mac_low, mac_high;
  
 +      function = 0;
        err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
        if (err)
                return err;
@@@ -2114,37 -2075,28 +2114,37 @@@ void qlcnic_83xx_config_intr_coal(struc
  static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
                                        u32 data[])
  {
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
        u8 link_status, duplex;
        /* link speed */
        link_status = LSB(data[3]) & 1;
 -      adapter->ahw->link_speed = MSW(data[2]);
 -      adapter->ahw->link_autoneg = MSB(MSW(data[3]));
 -      adapter->ahw->module_type = MSB(LSW(data[3]));
 -      duplex = LSB(MSW(data[3]));
 -      if (duplex)
 -              adapter->ahw->link_duplex = DUPLEX_FULL;
 -      else
 -              adapter->ahw->link_duplex = DUPLEX_HALF;
 -      adapter->ahw->has_link_events = 1;
 +      if (link_status) {
 +              ahw->link_speed = MSW(data[2]);
 +              duplex = LSB(MSW(data[3]));
 +              if (duplex)
 +                      ahw->link_duplex = DUPLEX_FULL;
 +              else
 +                      ahw->link_duplex = DUPLEX_HALF;
 +      } else {
 +              ahw->link_speed = SPEED_UNKNOWN;
 +              ahw->link_duplex = DUPLEX_UNKNOWN;
 +      }
 +
 +      ahw->link_autoneg = MSB(MSW(data[3]));
 +      ahw->module_type = MSB(LSW(data[3]));
 +      ahw->has_link_events = 1;
        qlcnic_advert_link_change(adapter, link_status);
  }
  
  irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
  {
        struct qlcnic_adapter *adapter = data;
 -      unsigned long flags;
 +      struct qlcnic_mailbox *mbx;
        u32 mask, resp, event;
 +      unsigned long flags;
  
 -      spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
 +      mbx = adapter->ahw->mailbox;
 +      spin_lock_irqsave(&mbx->aen_lock, flags);
        resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
        if (!(resp & QLCNIC_SET_OWNER))
                goto out;
        event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
        if (event &  QLCNIC_MBX_ASYNC_EVENT)
                __qlcnic_83xx_process_aen(adapter);
 +      else
 +              qlcnic_83xx_notify_mbx_response(mbx);
 +
  out:
        mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
        writel(0, adapter->ahw->pci_base0 + mask);
 -      spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
 -
 +      spin_unlock_irqrestore(&mbx->aen_lock, flags);
        return IRQ_HANDLED;
  }
  
@@@ -2331,7 -2281,7 +2331,7 @@@ int qlcnic_83xx_get_pci_info(struct qlc
                                         pci_info->tx_max_bw, pci_info->mac);
                }
                if (ahw->op_mode == QLCNIC_MGMT_FUNC)
 -                      dev_info(dev, "Max vNIC functions = %d, active vNIC functions = %d\n",
 +                      dev_info(dev, "Max functions = %d, active functions = %d\n",
                                 ahw->max_pci_func, ahw->act_pci_func);
  
        } else {
@@@ -2434,9 -2384,9 +2434,9 @@@ int qlcnic_83xx_lockless_flash_read32(s
                                      u32 flash_addr, u8 *p_data,
                                      int count)
  {
 -      int i, ret;
 -      u32 word, range, flash_offset, addr = flash_addr;
 +      u32 word, range, flash_offset, addr = flash_addr, ret;
        ulong indirect_add, direct_window;
 +      int i, err = 0;
  
        flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
        if (addr & 0x3) {
                /* Multi sector read */
                for (i = 0; i < count; i++) {
                        indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
 -                      ret = qlcnic_83xx_rd_reg_indirect(adapter,
 -                                                        indirect_add);
 -                      if (ret == -EIO)
 -                              return -EIO;
 +                      ret = QLCRD32(adapter, indirect_add, &err);
 +                      if (err == -EIO)
 +                              return err;
  
                        word = ret;
                        *(u32 *)p_data  = word;
                /* Single sector read */
                for (i = 0; i < count; i++) {
                        indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
 -                      ret = qlcnic_83xx_rd_reg_indirect(adapter,
 -                                                        indirect_add);
 -                      if (ret == -EIO)
 -                              return -EIO;
 +                      ret = QLCRD32(adapter, indirect_add, &err);
 +                      if (err == -EIO)
 +                              return err;
  
                        word = ret;
                        *(u32 *)p_data  = word;
@@@ -2495,13 -2447,10 +2495,13 @@@ static int qlcnic_83xx_poll_flash_statu
  {
        u32 status;
        int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
 +      int err = 0;
  
        do {
 -              status = qlcnic_83xx_rd_reg_indirect(adapter,
 -                                                   QLC_83XX_FLASH_STATUS);
 +              status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err);
 +              if (err == -EIO)
 +                      return err;
 +
                if ((status & QLC_83XX_FLASH_STATUS_READY) ==
                    QLC_83XX_FLASH_STATUS_READY)
                        break;
@@@ -2553,8 -2502,7 +2553,8 @@@ int qlcnic_83xx_disable_flash_write(str
  
  int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
  {
 -      int ret, mfg_id;
 +      int ret, err = 0;
 +      u32 mfg_id;
  
        if (qlcnic_83xx_lock_flash(adapter))
                return -EIO;
                return -EIO;
        }
  
 -      mfg_id = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
 -      if (mfg_id == -EIO)
 -              return -EIO;
 +      mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
 +      if (err == -EIO) {
 +              qlcnic_83xx_unlock_flash(adapter);
 +              return err;
 +      }
  
        adapter->flash_mfg_id = (mfg_id & 0xFF);
        qlcnic_83xx_unlock_flash(adapter);
@@@ -2690,7 -2636,7 +2690,7 @@@ int qlcnic_83xx_flash_bulk_write(struc
                                 u32 *p_data, int count)
  {
        u32 temp;
 -      int ret = -EIO;
 +      int ret = -EIO, err = 0;
  
        if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
            (count > QLC_83XX_FLASH_WRITE_MAX)) {
                return -EIO;
        }
  
 -      temp = qlcnic_83xx_rd_reg_indirect(adapter,
 -                                         QLC_83XX_FLASH_SPI_CONTROL);
 +      temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
 +      if (err == -EIO)
 +              return err;
 +
        qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
                                     (temp | QLC_83XX_FLASH_SPI_CTRL));
        qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
                return -EIO;
        }
  
 -      ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_SPI_STATUS);
 +      ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err);
 +      if (err == -EIO)
 +              return err;
 +
        if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
                dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
                        __func__, __LINE__);
                /* Operation failed, clear error bit */
 -              temp = qlcnic_83xx_rd_reg_indirect(adapter,
 -                                                 QLC_83XX_FLASH_SPI_CONTROL);
 +              temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
 +              if (err == -EIO)
 +                      return err;
 +
                qlcnic_83xx_wrt_reg_indirect(adapter,
                                             QLC_83XX_FLASH_SPI_CONTROL,
                                             (temp | QLC_83XX_FLASH_SPI_CTRL));
@@@ -2884,7 -2823,6 +2884,7 @@@ int qlcnic_83xx_ms_mem_write128(struct 
  {
        int i, j, ret = 0;
        u32 temp;
 +      int err = 0;
  
        /* Check alignment */
        if (addr & 0xF)
                                             QLCNIC_TA_WRITE_START);
  
                for (j = 0; j < MAX_CTL_CHECK; j++) {
 -                      temp = qlcnic_83xx_rd_reg_indirect(adapter,
 -                                                         QLCNIC_MS_CTRL);
 +                      temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err);
 +                      if (err == -EIO) {
 +                              mutex_unlock(&adapter->ahw->mem_lock);
 +                              return err;
 +                      }
 +
                        if ((temp & TA_CTL_BUSY) == 0)
                                break;
                }
  int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
                             u8 *p_data, int count)
  {
 -      int i, ret;
 -      u32 word, addr = flash_addr;
 +      u32 word, addr = flash_addr, ret;
        ulong  indirect_addr;
 +      int i, err = 0;
  
        if (qlcnic_83xx_lock_flash(adapter) != 0)
                return -EIO;
                }
  
                indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
 -              ret = qlcnic_83xx_rd_reg_indirect(adapter,
 -                                                indirect_addr);
 -              if (ret == -EIO)
 -                      return -EIO;
 +              ret = QLCRD32(adapter, indirect_addr, &err);
 +              if (err == -EIO)
 +                      return err;
 +
                word = ret;
                *(u32 *)p_data  = word;
                p_data = p_data + 4;
@@@ -3080,8 -3014,8 +3080,8 @@@ int qlcnic_83xx_get_settings(struct qlc
        }
  
        if (ahw->port_type == QLCNIC_XGBE) {
 -              ecmd->supported = SUPPORTED_1000baseT_Full;
 -              ecmd->advertising = ADVERTISED_1000baseT_Full;
 +              ecmd->supported = SUPPORTED_10000baseT_Full;
 +              ecmd->advertising = ADVERTISED_10000baseT_Full;
        } else {
                ecmd->supported = (SUPPORTED_10baseT_Half |
                                   SUPPORTED_10baseT_Full |
@@@ -3148,7 -3082,7 +3148,7 @@@ int qlcnic_83xx_set_settings(struct qlc
        status = qlcnic_83xx_set_port_config(adapter);
        if (status) {
                dev_info(&adapter->pdev->dev,
-                        "Faild to Set Link Speed and autoneg.\n");
+                        "Failed to Set Link Speed and autoneg.\n");
                adapter->ahw->port_config = config;
        }
        return status;
@@@ -3310,11 -3244,6 +3310,11 @@@ int qlcnic_83xx_interrupt_test(struct n
        u8 val;
        int ret, max_sds_rings = adapter->max_sds_rings;
  
 +      if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
 +              netdev_info(netdev, "Device is resetting\n");
 +              return -EBUSY;
 +      }
 +
        if (qlcnic_get_diag_lock(adapter)) {
                netdev_info(netdev, "Device in diagnostics mode\n");
                return -EBUSY;
@@@ -3440,8 -3369,7 +3440,8 @@@ int qlcnic_83xx_set_pauseparam(struct q
  
  static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
  {
 -      int ret;
 +      int ret, err = 0;
 +      u32 temp;
  
        qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
                                     QLC_83XX_FLASH_OEM_READ_SIG);
        if (ret)
                return -EIO;
  
 -      ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
 -      return ret & 0xFF;
 +      temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
 +      if (err == -EIO)
 +              return err;
 +
 +      return temp & 0xFF;
  }
  
  int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
@@@ -3521,360 -3446,3 +3521,360 @@@ int qlcnic_83xx_resume(struct qlcnic_ad
                             idc->delay);
        return err;
  }
 +
 +void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
 +{
 +      INIT_COMPLETION(mbx->completion);
 +      set_bit(QLC_83XX_MBX_READY, &mbx->status);
 +}
 +
 +void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
 +{
 +      destroy_workqueue(mbx->work_q);
 +      kfree(mbx);
 +}
 +
 +static inline void
 +qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter,
 +                                struct qlcnic_cmd_args *cmd)
 +{
 +      atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
 +
 +      if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
 +              qlcnic_free_mbx_args(cmd);
 +              kfree(cmd);
 +              return;
 +      }
 +      complete(&cmd->completion);
 +}
 +
 +static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
 +{
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
 +      struct list_head *head = &mbx->cmd_q;
 +      struct qlcnic_cmd_args *cmd = NULL;
 +
 +      spin_lock(&mbx->queue_lock);
 +
 +      while (!list_empty(head)) {
 +              cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
 +              dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n",
 +                       __func__, cmd->cmd_op);
 +              list_del(&cmd->list);
 +              mbx->num_cmds--;
 +              qlcnic_83xx_notify_cmd_completion(adapter, cmd);
 +      }
 +
 +      spin_unlock(&mbx->queue_lock);
 +}
 +
 +static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
 +{
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      struct qlcnic_mailbox *mbx = ahw->mailbox;
 +      u32 host_mbx_ctrl;
 +
 +      if (!test_bit(QLC_83XX_MBX_READY, &mbx->status))
 +              return -EBUSY;
 +
 +      host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
 +      if (host_mbx_ctrl) {
 +              clear_bit(QLC_83XX_MBX_READY, &mbx->status);
 +              ahw->idc.collect_dump = 1;
 +              return -EIO;
 +      }
 +
 +      return 0;
 +}
 +
 +static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter,
 +                                            u8 issue_cmd)
 +{
 +      if (issue_cmd)
 +              QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
 +      else
 +              QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
 +}
 +
 +static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
 +                                      struct qlcnic_cmd_args *cmd)
 +{
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
 +
 +      spin_lock(&mbx->queue_lock);
 +
 +      list_del(&cmd->list);
 +      mbx->num_cmds--;
 +
 +      spin_unlock(&mbx->queue_lock);
 +
 +      qlcnic_83xx_notify_cmd_completion(adapter, cmd);
 +}
 +
 +static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter,
 +                                     struct qlcnic_cmd_args *cmd)
 +{
 +      u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp;
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      int i, j;
 +
 +      if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) {
 +              mbx_cmd = cmd->req.arg[0];
 +              writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
 +              for (i = 1; i < cmd->req.num; i++)
 +                      writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
 +      } else {
 +              fw_hal_version = ahw->fw_hal_version;
 +              hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32);
 +              total_size = cmd->pay_size + hdr_size;
 +              tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16;
 +              mbx_cmd = tmp | fw_hal_version << 29;
 +              writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
 +
 +              /* Back channel specific operations bits */
 +              mbx_cmd = 0x1 | 1 << 4;
 +
 +              if (qlcnic_sriov_pf_check(adapter))
 +                      mbx_cmd |= cmd->func_num << 5;
 +
 +              writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
 +
 +              for (i = 2, j = 0; j < hdr_size; i++, j++)
 +                      writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i));
 +              for (j = 0; j < cmd->pay_size; j++, i++)
 +                      writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i));
 +      }
 +}
 +
 +void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
 +{
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
 +
 +      clear_bit(QLC_83XX_MBX_READY, &mbx->status);
 +      complete(&mbx->completion);
 +      cancel_work_sync(&mbx->work);
 +      flush_workqueue(mbx->work_q);
 +      qlcnic_83xx_flush_mbx_queue(adapter);
 +}
 +
 +static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
 +                                     struct qlcnic_cmd_args *cmd,
 +                                     unsigned long *timeout)
 +{
 +      struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
 +
 +      if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
 +              atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
 +              init_completion(&cmd->completion);
 +              cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
 +
 +              spin_lock(&mbx->queue_lock);
 +
 +              list_add_tail(&cmd->list, &mbx->cmd_q);
 +              mbx->num_cmds++;
 +              cmd->total_cmds = mbx->num_cmds;
 +              *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
 +              queue_work(mbx->work_q, &mbx->work);
 +
 +              spin_unlock(&mbx->queue_lock);
 +
 +              return 0;
 +      }
 +
 +      return -EBUSY;
 +}
 +
 +static int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter,
 +                                     struct qlcnic_cmd_args *cmd)
 +{
 +      u8 mac_cmd_rcode;
 +      u32 fw_data;
 +
 +      if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) {
 +              fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
 +              mac_cmd_rcode = (u8)fw_data;
 +              if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
 +                  mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
 +                  mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
 +                      cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
 +                      return QLCNIC_RCODE_SUCCESS;
 +              }
 +      }
 +
 +      return -EINVAL;
 +}
 +
 +static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
 +                                     struct qlcnic_cmd_args *cmd)
 +{
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      struct device *dev = &adapter->pdev->dev;
 +      u8 mbx_err_code;
 +      u32 fw_data;
 +
 +      fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
 +      mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
 +      qlcnic_83xx_get_mbx_data(adapter, cmd);
 +
 +      switch (mbx_err_code) {
 +      case QLCNIC_MBX_RSP_OK:
 +      case QLCNIC_MBX_PORT_RSP_OK:
 +              cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
 +              break;
 +      default:
 +              if (!qlcnic_83xx_check_mac_rcode(adapter, cmd))
 +                      break;
 +
 +              dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n",
 +                      __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
 +                      ahw->op_mode, mbx_err_code);
 +              cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED;
 +              qlcnic_dump_mbx(adapter, cmd);
 +      }
 +
 +      return;
 +}
 +
 +static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
 +{
 +      struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
 +                                                work);
 +      struct qlcnic_adapter *adapter = mbx->adapter;
 +      struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
 +      struct device *dev = &adapter->pdev->dev;
 +      atomic_t *rsp_status = &mbx->rsp_status;
 +      struct list_head *head = &mbx->cmd_q;
 +      struct qlcnic_hardware_context *ahw;
 +      struct qlcnic_cmd_args *cmd = NULL;
 +
 +      ahw = adapter->ahw;
 +
 +      while (true) {
 +              if (qlcnic_83xx_check_mbx_status(adapter)) {
 +                      qlcnic_83xx_flush_mbx_queue(adapter);
 +                      return;
 +              }
 +
 +              atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
 +
 +              spin_lock(&mbx->queue_lock);
 +
 +              if (list_empty(head)) {
 +                      spin_unlock(&mbx->queue_lock);
 +                      return;
 +              }
 +              cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
 +
 +              spin_unlock(&mbx->queue_lock);
 +
 +              mbx_ops->encode_cmd(adapter, cmd);
 +              mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
 +
 +              if (wait_for_completion_timeout(&mbx->completion,
 +                                              QLC_83XX_MBX_TIMEOUT)) {
 +                      mbx_ops->decode_resp(adapter, cmd);
 +                      mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION);
 +              } else {
 +                      dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n",
 +                              __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
 +                              ahw->op_mode);
 +                      clear_bit(QLC_83XX_MBX_READY, &mbx->status);
 +                      qlcnic_dump_mbx(adapter, cmd);
 +                      qlcnic_83xx_idc_request_reset(adapter,
 +                                                    QLCNIC_FORCE_FW_DUMP_KEY);
 +                      cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT;
 +              }
 +              mbx_ops->dequeue_cmd(adapter, cmd);
 +      }
 +}
 +
 +static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
 +      .enqueue_cmd    = qlcnic_83xx_enqueue_mbx_cmd,
 +      .dequeue_cmd    = qlcnic_83xx_dequeue_mbx_cmd,
 +      .decode_resp    = qlcnic_83xx_decode_mbx_rsp,
 +      .encode_cmd     = qlcnic_83xx_encode_mbx_cmd,
 +      .nofity_fw      = qlcnic_83xx_signal_mbx_cmd,
 +};
 +
 +int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
 +{
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      struct qlcnic_mailbox *mbx;
 +
 +      ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL);
 +      if (!ahw->mailbox)
 +              return -ENOMEM;
 +
 +      mbx = ahw->mailbox;
 +      mbx->ops = &qlcnic_83xx_mbx_ops;
 +      mbx->adapter = adapter;
 +
 +      spin_lock_init(&mbx->queue_lock);
 +      spin_lock_init(&mbx->aen_lock);
 +      INIT_LIST_HEAD(&mbx->cmd_q);
 +      init_completion(&mbx->completion);
 +
 +      mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox");
 +      if (mbx->work_q == NULL) {
 +              kfree(mbx);
 +              return -ENOMEM;
 +      }
 +
 +      INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
 +      set_bit(QLC_83XX_MBX_READY, &mbx->status);
 +      return 0;
 +}
 +
 +pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
 +                                             pci_channel_state_t state)
 +{
 +      struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
 +
 +      if (state == pci_channel_io_perm_failure)
 +              return PCI_ERS_RESULT_DISCONNECT;
 +
 +      if (state == pci_channel_io_normal)
 +              return PCI_ERS_RESULT_RECOVERED;
 +
 +      set_bit(__QLCNIC_AER, &adapter->state);
 +      set_bit(__QLCNIC_RESETTING, &adapter->state);
 +
 +      qlcnic_83xx_aer_stop_poll_work(adapter);
 +
 +      pci_save_state(pdev);
 +      pci_disable_device(pdev);
 +
 +      return PCI_ERS_RESULT_NEED_RESET;
 +}
 +
 +pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
 +{
 +      struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
 +      int err = 0;
 +
 +      pdev->error_state = pci_channel_io_normal;
 +      err = pci_enable_device(pdev);
 +      if (err)
 +              goto disconnect;
 +
 +      pci_set_power_state(pdev, PCI_D0);
 +      pci_set_master(pdev);
 +      pci_restore_state(pdev);
 +
 +      err = qlcnic_83xx_aer_reset(adapter);
 +      if (err == 0)
 +              return PCI_ERS_RESULT_RECOVERED;
 +disconnect:
 +      clear_bit(__QLCNIC_AER, &adapter->state);
 +      clear_bit(__QLCNIC_RESETTING, &adapter->state);
 +      return PCI_ERS_RESULT_DISCONNECT;
 +}
 +
 +void qlcnic_83xx_io_resume(struct pci_dev *pdev)
 +{
 +      struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
 +
 +      pci_cleanup_aer_uncorrect_error_status(pdev);
 +      if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
 +              qlcnic_83xx_aer_start_poll_work(adapter);
 +}
index b7b245b43b8703b6bdab2322ef538b0f0b97a9f6,2fe15c591b3a5bef8a3e48bc388b42fc38b255dc..11b4bb83b9308b2c4a9331ea737ee7766c074794
  struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
                                     struct qlcnic_host_rds_ring *, u16, u16);
  
 +inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
 +                                struct qlcnic_host_tx_ring *tx_ring)
 +{
 +      if (qlcnic_check_multi_tx(adapter) &&
 +          !adapter->ahw->diag_test)
 +              writel(0x0, tx_ring->crb_intr_mask);
 +}
 +
 +
 +static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
 +                                       struct qlcnic_host_tx_ring *tx_ring)
 +{
 +      if (qlcnic_check_multi_tx(adapter) &&
 +          !adapter->ahw->diag_test)
 +              writel(1, tx_ring->crb_intr_mask);
 +}
 +
  inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
                                       struct qlcnic_host_tx_ring *tx_ring)
  {
@@@ -164,7 -147,10 +164,7 @@@ static inline u8 qlcnic_mac_hash(u64 ma
  static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
                                        u16 handle, u8 ring_id)
  {
 -      unsigned short device = adapter->pdev->device;
 -
 -      if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
 -          (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
 +      if (qlcnic_83xx_check(adapter))
                return handle | (ring_id << 15);
        else
                return handle;
@@@ -175,68 -161,36 +175,68 @@@ static inline int qlcnic_82xx_is_lb_pkt
        return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
  }
  
 +static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
 +                                    struct qlcnic_filter *fil,
 +                                    void *addr, u16 vlan_id)
 +{
 +      int ret;
 +      u8 op;
 +
 +      op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
 +      ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
 +      if (ret)
 +              return;
 +
 +      op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
 +      ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
 +      if (!ret) {
 +              hlist_del(&fil->fnode);
 +              adapter->rx_fhash.fnum--;
 +      }
 +}
 +
 +static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
 +                                                  void *addr, u16 vlan_id)
 +{
 +      struct qlcnic_filter *tmp_fil = NULL;
 +      struct hlist_node *n;
 +
 +      hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
 +              if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
 +                  tmp_fil->vlan_id == vlan_id)
 +                      return tmp_fil;
 +      }
 +
 +      return NULL;
 +}
 +
  void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
                          int loopback_pkt, u16 vlan_id)
  {
        struct ethhdr *phdr = (struct ethhdr *)(skb->data);
        struct qlcnic_filter *fil, *tmp_fil;
 -      struct hlist_node *n;
        struct hlist_head *head;
        unsigned long time;
        u64 src_addr = 0;
 -      u8 hindex, found = 0, op;
 +      u8 hindex, op;
        int ret;
  
        memcpy(&src_addr, phdr->h_source, ETH_ALEN);
 +      hindex = qlcnic_mac_hash(src_addr) &
 +               (adapter->fhash.fbucket_size - 1);
  
        if (loopback_pkt) {
                if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
                        return;
  
 -              hindex = qlcnic_mac_hash(src_addr) &
 -                       (adapter->fhash.fbucket_size - 1);
                head = &(adapter->rx_fhash.fhead[hindex]);
  
 -              hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
 -                      if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
 -                          tmp_fil->vlan_id == vlan_id) {
 -                              time = tmp_fil->ftime;
 -                              if (jiffies > (QLCNIC_READD_AGE * HZ + time))
 -                                      tmp_fil->ftime = jiffies;
 -                              return;
 -                      }
 +              tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
 +              if (tmp_fil) {
 +                      time = tmp_fil->ftime;
 +                      if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
 +                              tmp_fil->ftime = jiffies;
 +                      return;
                }
  
                fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
                adapter->rx_fhash.fnum++;
                spin_unlock(&adapter->rx_mac_learn_lock);
        } else {
 -              hindex = qlcnic_mac_hash(src_addr) &
 -                       (adapter->fhash.fbucket_size - 1);
 -              head = &(adapter->rx_fhash.fhead[hindex]);
 -              spin_lock(&adapter->rx_mac_learn_lock);
 -              hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
 -                      if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
 -                          tmp_fil->vlan_id == vlan_id) {
 -                              found = 1;
 -                              break;
 -                      }
 -              }
 +              head = &adapter->fhash.fhead[hindex];
  
 -              if (!found) {
 -                      spin_unlock(&adapter->rx_mac_learn_lock);
 -                      return;
 -              }
 +              spin_lock(&adapter->mac_learn_lock);
  
 -              op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
 -              ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr,
 -                                              vlan_id, op);
 -              if (!ret) {
 +              tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
 +              if (tmp_fil) {
                        op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
                        ret = qlcnic_sre_macaddr_change(adapter,
                                                        (u8 *)&src_addr,
                                                        vlan_id, op);
                        if (!ret) {
 -                              hlist_del(&(tmp_fil->fnode));
 -                              adapter->rx_fhash.fnum--;
 +                              hlist_del(&tmp_fil->fnode);
 +                              adapter->fhash.fnum--;
                        }
 +
 +                      spin_unlock(&adapter->mac_learn_lock);
 +
 +                      return;
                }
 +
 +              spin_unlock(&adapter->mac_learn_lock);
 +
 +              head = &adapter->rx_fhash.fhead[hindex];
 +
 +              spin_lock(&adapter->rx_mac_learn_lock);
 +
 +              tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
 +              if (tmp_fil)
 +                      qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
 +                                                vlan_id);
 +
                spin_unlock(&adapter->rx_mac_learn_lock);
        }
  }
@@@ -309,7 -262,7 +309,7 @@@ void qlcnic_82xx_change_filter(struct q
  
        mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
        mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
 -      memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
 +      memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
  
        vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
        vlan_req->vlan_id = cpu_to_le16(vlan_id);
@@@ -371,14 -324,14 +371,14 @@@ static void qlcnic_send_filter(struct q
  }
  
  static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
 -                       struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
 +                       struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
 +                       struct qlcnic_host_tx_ring *tx_ring)
  {
        u8 l4proto, opcode = 0, hdr_len = 0;
        u16 flags = 0, vlan_tci = 0;
        int copied, offset, copy_len, size;
        struct cmd_desc_type0 *hwdesc;
        struct vlan_ethhdr *vh;
 -      struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
        u16 protocol = ntohs(skb->protocol);
        u32 producer = tx_ring->producer;
  
@@@ -561,7 -514,7 +561,7 @@@ static inline void qlcnic_clear_cmddesc
  netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 -      struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
 +      struct qlcnic_host_tx_ring *tx_ring;
        struct qlcnic_cmd_buffer *pbuf;
        struct qlcnic_skb_frag *buffrag;
        struct cmd_desc_type0 *hwdesc, *first_desc;
        int i, k, frag_count, delta = 0;
        u32 producer, num_txd;
  
 -      num_txd = tx_ring->num_desc;
 -
        if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
 -              netif_stop_queue(netdev);
 +              netif_tx_stop_all_queues(netdev);
                return NETDEV_TX_BUSY;
        }
  
                        goto drop_packet;
        }
  
 +      if (qlcnic_check_multi_tx(adapter))
 +              tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
 +      else
 +              tx_ring = &adapter->tx_ring[0];
 +      num_txd = tx_ring->num_desc;
 +
        frag_count = skb_shinfo(skb)->nr_frags + 1;
 +
        /* 14 frags supported for normal packet and
         * 32 frags supported for TSO packet
         */
        }
  
        if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
 -              netif_stop_queue(netdev);
 +              netif_tx_stop_queue(tx_ring->txq);
                if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
 -                      netif_start_queue(netdev);
 +                      netif_tx_start_queue(tx_ring->txq);
                } else {
                        adapter->stats.xmit_off++;
 +                      tx_ring->xmit_off++;
                        return NETDEV_TX_BUSY;
                }
        }
        tx_ring->producer = get_next_index(producer, num_txd);
        smp_mb();
  
 -      if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
 +      if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring)))
                goto unwind_buff;
  
        if (adapter->drv_mac_learn)
  
        adapter->stats.txbytes += skb->len;
        adapter->stats.xmitcalled++;
 +      tx_ring->xmit_called++;
  
        qlcnic_update_cmd_producer(tx_ring);
  
@@@ -694,7 -640,7 +694,7 @@@ void qlcnic_advert_link_change(struct q
                adapter->ahw->linkup = 0;
                if (netif_running(netdev)) {
                        netif_carrier_off(netdev);
 -                      netif_stop_queue(netdev);
 +                      netif_tx_stop_all_queues(netdev);
                }
        } else if (!adapter->ahw->linkup && linkup) {
                netdev_info(netdev, "NIC Link is up\n");
@@@ -789,6 -735,9 +789,6 @@@ static int qlcnic_process_cmd_ring(stru
        struct net_device *netdev = adapter->netdev;
        struct qlcnic_skb_frag *frag;
  
 -      if (!spin_trylock(&adapter->tx_clean_lock))
 -              return 1;
 -
        sw_consumer = tx_ring->sw_consumer;
        hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
  
                                frag->dma = 0ULL;
                        }
                        adapter->stats.xmitfinished++;
 +                      tx_ring->xmit_finished++;
                        dev_kfree_skb_any(buffer->skb);
                        buffer->skb = NULL;
                }
        if (count && netif_running(netdev)) {
                tx_ring->sw_consumer = sw_consumer;
                smp_mb();
 -              if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
 +              if (netif_tx_queue_stopped(tx_ring->txq) &&
 +                  netif_carrier_ok(netdev)) {
                        if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
 -                              netif_wake_queue(netdev);
 +                              netif_tx_wake_queue(tx_ring->txq);
                                adapter->stats.xmit_on++;
 +                              tx_ring->xmit_on++;
                        }
                }
                adapter->tx_timeo_cnt = 0;
         */
        hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
        done = (sw_consumer == hw_consumer);
 -      spin_unlock(&adapter->tx_clean_lock);
  
        return done;
  }
@@@ -853,40 -800,16 +853,40 @@@ static int qlcnic_poll(struct napi_stru
        int tx_complete, work_done;
        struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_adapter *adapter;
 +      struct qlcnic_host_tx_ring *tx_ring;
  
        sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
        adapter = sds_ring->adapter;
 -      tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
 +      tx_ring = sds_ring->tx_ring;
 +
 +      tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
                                              budget);
        work_done = qlcnic_process_rcv_ring(sds_ring, budget);
        if ((work_done < budget) && tx_complete) {
                napi_complete(&sds_ring->napi);
 -              if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
 +              if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
                        qlcnic_enable_int(sds_ring);
 +                      qlcnic_enable_tx_intr(adapter, tx_ring);
 +              }
 +      }
 +
 +      return work_done;
 +}
 +
 +static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
 +{
 +      struct qlcnic_host_tx_ring *tx_ring;
 +      struct qlcnic_adapter *adapter;
 +      int work_done;
 +
 +      tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
 +      adapter = tx_ring->adapter;
 +
 +      work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
 +      if (work_done) {
 +              napi_complete(&tx_ring->napi);
 +              if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
 +                      qlcnic_enable_tx_intr(adapter, tx_ring);
        }
  
        return work_done;
@@@ -996,23 -919,20 +996,23 @@@ static void qlcnic_handle_fw_message(in
                        break;
                case 1:
                        dev_info(dev, "loopback already in progress\n");
 -                      adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
 +                      adapter->ahw->diag_cnt = -EINPROGRESS;
                        break;
                case 2:
                        dev_info(dev, "loopback cable is not connected\n");
 -                      adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
 +                      adapter->ahw->diag_cnt = -ENODEV;
                        break;
                default:
                        dev_info(dev,
                                 "loopback configure request failed, err %x\n",
                                 ret);
 -                      adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
 +                      adapter->ahw->diag_cnt = -EIO;
                        break;
                }
                break;
 +      case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
 +              qlcnic_dcb_handle_aen(adapter, (void *)&msg);
 +              break;
        default:
                break;
        }
@@@ -1458,31 -1378,23 +1458,31 @@@ void qlcnic_82xx_process_rcv_ring_diag(
  int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
                         struct net_device *netdev)
  {
 -      int ring, max_sds_rings;
 +      int ring;
        struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 +      struct qlcnic_host_tx_ring *tx_ring;
  
        if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
                return -ENOMEM;
  
 -      max_sds_rings = adapter->max_sds_rings;
 -
        for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                sds_ring = &recv_ctx->sds_rings[ring];
 -              if (ring == adapter->max_sds_rings - 1)
 -                      netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
 -                                     QLCNIC_NETDEV_WEIGHT / max_sds_rings);
 -              else
 +              if (qlcnic_check_multi_tx(adapter) &&
 +                  !adapter->ahw->diag_test &&
 +                  (adapter->max_drv_tx_rings > 1)) {
                        netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
 -                                     QLCNIC_NETDEV_WEIGHT*2);
 +                                     NAPI_POLL_WEIGHT);
 +              } else {
 +                      if (ring == (adapter->max_sds_rings - 1))
 +                              netif_napi_add(netdev, &sds_ring->napi,
 +                                             qlcnic_poll,
 +                                             NAPI_POLL_WEIGHT);
 +                      else
 +                              netif_napi_add(netdev, &sds_ring->napi,
 +                                             qlcnic_rx_poll,
 +                                             NAPI_POLL_WEIGHT);
 +              }
        }
  
        if (qlcnic_alloc_tx_rings(adapter, netdev)) {
                return -ENOMEM;
        }
  
 +      if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
 +              for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
 +                      tx_ring = &adapter->tx_ring[ring];
 +                      netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
 +                                     NAPI_POLL_WEIGHT);
 +              }
 +      }
 +
        return 0;
  }
  
@@@ -1506,7 -1410,6 +1506,7 @@@ void qlcnic_82xx_napi_del(struct qlcnic
        int ring;
        struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 +      struct qlcnic_host_tx_ring *tx_ring;
  
        for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                sds_ring = &recv_ctx->sds_rings[ring];
        }
  
        qlcnic_free_sds_rings(adapter->recv_ctx);
 +
 +      if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
 +              for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
 +                      tx_ring = &adapter->tx_ring[ring];
 +                      netif_napi_del(&tx_ring->napi);
 +              }
 +      }
 +
        qlcnic_free_tx_rings(adapter);
  }
  
@@@ -1529,7 -1424,6 +1529,7 @@@ void qlcnic_82xx_napi_enable(struct qlc
  {
        int ring;
        struct qlcnic_host_sds_ring *sds_ring;
 +      struct qlcnic_host_tx_ring *tx_ring;
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  
        if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
                napi_enable(&sds_ring->napi);
                qlcnic_enable_int(sds_ring);
        }
 +
 +      if (qlcnic_check_multi_tx(adapter) &&
 +          (adapter->flags & QLCNIC_MSIX_ENABLED) &&
 +          !adapter->ahw->diag_test &&
 +          (adapter->max_drv_tx_rings > 1)) {
 +              for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
 +                      tx_ring = &adapter->tx_ring[ring];
 +                      napi_enable(&tx_ring->napi);
 +                      qlcnic_enable_tx_intr(adapter, tx_ring);
 +              }
 +      }
  }
  
  void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
  {
        int ring;
        struct qlcnic_host_sds_ring *sds_ring;
 +      struct qlcnic_host_tx_ring *tx_ring;
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  
        if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
                napi_synchronize(&sds_ring->napi);
                napi_disable(&sds_ring->napi);
        }
 +
 +      if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
 +          !adapter->ahw->diag_test &&
 +          qlcnic_check_multi_tx(adapter)) {
 +              for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
 +                      tx_ring = &adapter->tx_ring[ring];
 +                      qlcnic_disable_tx_int(adapter, tx_ring);
 +                      napi_synchronize(&tx_ring->napi);
 +                      napi_disable(&tx_ring->napi);
 +              }
 +      }
  }
  
  #define QLC_83XX_NORMAL_LB_PKT        (1ULL << 36)
@@@ -1781,7 -1652,7 +1781,7 @@@ static int qlcnic_83xx_process_rcv_ring
                        break;
                default:
                        dev_info(&adapter->pdev->dev,
-                                "Unkonwn opcode: 0x%x\n", opcode);
+                                "Unknown opcode: 0x%x\n", opcode);
                        goto skip;
                }
  
@@@ -1960,7 -1831,7 +1960,7 @@@ void qlcnic_83xx_napi_disable(struct ql
  int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
                         struct net_device *netdev)
  {
 -      int ring, max_sds_rings, temp;
 +      int ring;
        struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_host_tx_ring *tx_ring;
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
        if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
                return -ENOMEM;
  
 -      max_sds_rings = adapter->max_sds_rings;
        for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                sds_ring = &recv_ctx->sds_rings[ring];
                if (adapter->flags & QLCNIC_MSIX_ENABLED) {
 -                      if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
 +                      if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
                                netif_napi_add(netdev, &sds_ring->napi,
                                               qlcnic_83xx_rx_poll,
 -                                             QLCNIC_NETDEV_WEIGHT * 2);
 -                      } else {
 -                              temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings;
 +                                             NAPI_POLL_WEIGHT);
 +                      else
                                netif_napi_add(netdev, &sds_ring->napi,
                                               qlcnic_83xx_msix_sriov_vf_poll,
 -                                             temp);
 -                      }
 +                                             NAPI_POLL_WEIGHT);
  
                } else {
                        netif_napi_add(netdev, &sds_ring->napi,
                                       qlcnic_83xx_poll,
 -                                     QLCNIC_NETDEV_WEIGHT / max_sds_rings);
 +                                     NAPI_POLL_WEIGHT);
                }
        }
  
                        tx_ring = &adapter->tx_ring[ring];
                        netif_napi_add(netdev, &tx_ring->napi,
                                       qlcnic_83xx_msix_tx_poll,
 -                                     QLCNIC_NETDEV_WEIGHT);
 +                                     NAPI_POLL_WEIGHT);
                }
        }
  
index b7a39305472b156c05b21d0af850fd56fc3945dd,6c1e34cd8ae533647c0ffcfaaa0094e9ce607509..975dc2d8e548c0942f58cac47550883528f6ebb8
@@@ -1309,9 -1309,23 +1309,9 @@@ static void sis900_timer(unsigned long 
        struct sis900_private *sis_priv = netdev_priv(net_dev);
        struct mii_phy *mii_phy = sis_priv->mii;
        static const int next_tick = 5*HZ;
 +      int speed = 0, duplex = 0;
        u16 status;
  
 -      if (!sis_priv->autong_complete){
 -              int uninitialized_var(speed), duplex = 0;
 -
 -              sis900_read_mode(net_dev, &speed, &duplex);
 -              if (duplex){
 -                      sis900_set_mode(sis_priv, speed, duplex);
 -                      sis630_set_eq(net_dev, sis_priv->chipset_rev);
 -                      netif_start_queue(net_dev);
 -              }
 -
 -              sis_priv->timer.expires = jiffies + HZ;
 -              add_timer(&sis_priv->timer);
 -              return;
 -      }
 -
        status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
        status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
  
                status = sis900_default_phy(net_dev);
                mii_phy = sis_priv->mii;
  
 -              if (status & MII_STAT_LINK){
 -                      sis900_check_mode(net_dev, mii_phy);
 -                      netif_carrier_on(net_dev);
 +              if (status & MII_STAT_LINK) {
 +                      WARN_ON(!(status & MII_STAT_AUTO_DONE));
 +
 +                      sis900_read_mode(net_dev, &speed, &duplex);
 +                      if (duplex) {
 +                              sis900_set_mode(sis_priv, speed, duplex);
 +                              sis630_set_eq(net_dev, sis_priv->chipset_rev);
 +                              netif_carrier_on(net_dev);
 +                      }
                }
        } else {
        /* Link ON -> OFF */
@@@ -1604,6 -1612,12 +1604,6 @@@ sis900_start_xmit(struct sk_buff *skb, 
        unsigned int  index_cur_tx, index_dirty_tx;
        unsigned int  count_dirty_tx;
  
 -      /* Don't transmit data before the complete of auto-negotiation */
 -      if(!sis_priv->autong_complete){
 -              netif_stop_queue(net_dev);
 -              return NETDEV_TX_BUSY;
 -      }
 -
        spin_lock_irqsave(&sis_priv->lock, flags);
  
        /* Calculate the next Tx descriptor entry. */
@@@ -1709,7 -1723,7 +1709,7 @@@ static irqreturn_t sis900_interrupt(in
  
        if(netif_msg_intr(sis_priv))
                printk(KERN_DEBUG "%s: exiting interrupt, "
-                      "interrupt status = 0x%#8.8x.\n",
+                      "interrupt status = %#8.8x\n",
                       net_dev->name, sr32(isr));
  
        spin_unlock (&sis_priv->lock);
index c17b74c31398fdd8b6f44e6b76aa2897c1c03c22,39b3ffbc53b0da3d251110dee92fa49bcbefb807..76a3c177e100ab28660ac9433fe5c26edc5a79e8
@@@ -73,6 -73,7 +73,6 @@@
  #include "iwl-prph.h"
  
  /* A TimeUnit is 1024 microsecond */
 -#define TU_TO_JIFFIES(_tu)    (usecs_to_jiffies((_tu) * 1024))
  #define MSEC_TO_TU(_msec)     (_msec*1000/1024)
  
  /*
@@@ -137,20 -138,6 +137,20 @@@ static void iwl_mvm_roc_finished(struc
        schedule_work(&mvm->roc_done_wk);
  }
  
 +static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
 +                                      struct ieee80211_vif *vif,
 +                                      const char *errmsg)
 +{
 +      if (vif->type != NL80211_IFTYPE_STATION)
 +              return false;
 +      if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
 +              return false;
 +      if (errmsg)
 +              IWL_ERR(mvm, "%s\n", errmsg);
 +      ieee80211_connection_loss(vif);
 +      return true;
 +}
 +
  /*
   * Handles a FW notification for an event that is known to the driver.
   *
@@@ -176,15 -163,10 +176,15 @@@ static void iwl_mvm_te_handle_notif(str
         * P2P Device discoveribility, while there are other higher priority
         * events in the system).
         */
 -      WARN_ONCE(!le32_to_cpu(notif->status),
 -                "Failed to schedule time event\n");
 +      if (WARN_ONCE(!le32_to_cpu(notif->status),
 +                    "Failed to schedule time event\n")) {
 +              if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
 +                      iwl_mvm_te_clear_data(mvm, te_data);
 +                      return;
 +              }
 +      }
  
 -      if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) {
 +      if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
                IWL_DEBUG_TE(mvm,
                             "TE ended - current time %lu, estimated end %lu\n",
                             jiffies, te_data->end_jiffies);
                 * By now, we should have finished association
                 * and know the dtim period.
                 */
 -              if (te_data->vif->type == NL80211_IFTYPE_STATION &&
 -                  (!te_data->vif->bss_conf.assoc ||
 -                   !te_data->vif->bss_conf.dtim_period)) {
 -                      IWL_ERR(mvm,
 -                              "No association and the time event is over already...\n");
 -                      ieee80211_connection_loss(te_data->vif);
 -              }
 -
 +              iwl_mvm_te_check_disconnect(mvm, te_data->vif,
-                       "No assocation and the time event is over already...");
++                      "No association and the time event is over already...");
                iwl_mvm_te_clear_data(mvm, te_data);
 -      } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) {
 +      } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
                te_data->running = true;
 -              te_data->end_jiffies = jiffies +
 -                      TU_TO_JIFFIES(te_data->duration);
 +              te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
  
                if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
                        set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
@@@ -268,67 -257,10 +268,67 @@@ static bool iwl_mvm_time_event_response
        return true;
  }
  
 +/* used to convert from time event API v2 to v1 */
 +#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
 +                           TE_V2_EVENT_SOCIOPATHIC)
 +static inline u16 te_v2_get_notify(__le16 policy)
 +{
 +      return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
 +}
 +
 +static inline u16 te_v2_get_dep_policy(__le16 policy)
 +{
 +      return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
 +              TE_V2_PLACEMENT_POS;
 +}
 +
 +static inline u16 te_v2_get_absence(__le16 policy)
 +{
 +      return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
 +}
 +
 +static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
 +                              struct iwl_time_event_cmd_v1 *cmd_v1)
 +{
 +      cmd_v1->id_and_color = cmd_v2->id_and_color;
 +      cmd_v1->action = cmd_v2->action;
 +      cmd_v1->id = cmd_v2->id;
 +      cmd_v1->apply_time = cmd_v2->apply_time;
 +      cmd_v1->max_delay = cmd_v2->max_delay;
 +      cmd_v1->depends_on = cmd_v2->depends_on;
 +      cmd_v1->interval = cmd_v2->interval;
 +      cmd_v1->duration = cmd_v2->duration;
 +      if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
 +              cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
 +      else
 +              cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
 +      cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
 +      cmd_v1->interval_reciprocal = 0; /* unused */
 +
 +      cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
 +      cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
 +      cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
 +}
 +
 +static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
 +                                     const struct iwl_time_event_cmd_v2 *cmd)
 +{
 +      struct iwl_time_event_cmd_v1 cmd_v1;
 +
 +      if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
 +              return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
 +                                          sizeof(*cmd), cmd);
 +
 +      iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
 +      return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
 +                                  sizeof(cmd_v1), &cmd_v1);
 +}
 +
 +
  static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
                                       struct ieee80211_vif *vif,
                                       struct iwl_mvm_time_event_data *te_data,
 -                                     struct iwl_time_event_cmd *te_cmd)
 +                                     struct iwl_time_event_cmd_v2 *te_cmd)
  {
        static const u8 time_event_response[] = { TIME_EVENT_CMD };
        struct iwl_notification_wait wait_time_event;
                                   ARRAY_SIZE(time_event_response),
                                   iwl_mvm_time_event_response, te_data);
  
 -      ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
 -                                 sizeof(*te_cmd), te_cmd);
 +      ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd);
        if (ret) {
                IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
                iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
@@@ -391,12 -324,13 +391,12 @@@ void iwl_mvm_protect_session(struct iwl
  {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
 -      struct iwl_time_event_cmd time_cmd = {};
 +      struct iwl_time_event_cmd_v2 time_cmd = {};
  
        lockdep_assert_held(&mvm->mutex);
  
        if (te_data->running &&
 -          time_after(te_data->end_jiffies,
 -                     jiffies + TU_TO_JIFFIES(min_duration))) {
 +          time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
                IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
                             jiffies_to_msecs(te_data->end_jiffies - jiffies));
                return;
        time_cmd.apply_time =
                cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
  
 -      time_cmd.dep_policy = TE_INDEPENDENT;
 -      time_cmd.is_present = cpu_to_le32(1);
 -      time_cmd.max_frags = cpu_to_le32(TE_FRAG_NONE);
 +      time_cmd.max_frags = TE_V2_FRAG_NONE;
        time_cmd.max_delay = cpu_to_le32(500);
        /* TODO: why do we need to interval = bi if it is not periodic? */
        time_cmd.interval = cpu_to_le32(1);
 -      time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1));
        time_cmd.duration = cpu_to_le32(duration);
 -      time_cmd.repeat = cpu_to_le32(1);
 -      time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START |
 -                                    TE_NOTIF_HOST_EVENT_END);
 +      time_cmd.repeat = 1;
 +      time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
 +                                    TE_V2_NOTIF_HOST_EVENT_END);
  
        iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  }
@@@ -446,7 -383,7 +446,7 @@@ void iwl_mvm_remove_time_event(struct i
                               struct iwl_mvm_vif *mvmvif,
                               struct iwl_mvm_time_event_data *te_data)
  {
 -      struct iwl_time_event_cmd time_cmd = {};
 +      struct iwl_time_event_cmd_v2 time_cmd = {};
        u32 id, uid;
        int ret;
  
                cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
  
        IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
 -      ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
 -                                 sizeof(time_cmd), &time_cmd);
 +      ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd);
        if (WARN_ON(ret))
                return;
  }
@@@ -503,7 -441,7 +503,7 @@@ int iwl_mvm_start_p2p_roc(struct iwl_mv
  {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
 -      struct iwl_time_event_cmd time_cmd = {};
 +      struct iwl_time_event_cmd_v2 time_cmd = {};
  
        lockdep_assert_held(&mvm->mutex);
        if (te_data->running) {
        }
  
        time_cmd.apply_time = cpu_to_le32(0);
 -      time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT);
 -      time_cmd.is_present = cpu_to_le32(1);
        time_cmd.interval = cpu_to_le32(1);
  
        /*
         * scheduled. To improve the chances of it being scheduled, allow them
         * to be fragmented, and in addition allow them to be delayed.
         */
 -      time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20);
 +      time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
        time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
        time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
 -      time_cmd.repeat = cpu_to_le32(1);
 -      time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START |
 -                                    TE_NOTIF_HOST_EVENT_END);
 +      time_cmd.repeat = 1;
 +      time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
 +                                    TE_V2_NOTIF_HOST_EVENT_END);
  
        return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
  }
diff --combined drivers/scsi/hpsa.c
index b766f5aea584811a6b1e071f26691126a014c88c,4cfa3af95b528a5195bc8b4dde5374a8f5064dff..fac8cf5832ddce5bf0fe182570f17e8dbdb6899b
@@@ -583,7 -583,7 +583,7 @@@ static void set_performant_mode(struct 
                c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
                if (likely(h->msix_vector))
                        c->Header.ReplyQueue =
 -                              smp_processor_id() % h->nreply_queues;
 +                              raw_smp_processor_id() % h->nreply_queues;
        }
  }
  
@@@ -1054,7 -1054,7 +1054,7 @@@ free_and_out
  }
  
  /*
-  * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
+  * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
   * Assume's h->devlock is held.
   */
  static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
@@@ -1205,8 -1205,8 +1205,8 @@@ static void complete_scsi_command(struc
        scsi_set_resid(cmd, ei->ResidualCnt);
  
        if (ei->CommandStatus == 0) {
 -              cmd->scsi_done(cmd);
                cmd_free(h, cp);
 +              cmd->scsi_done(cmd);
                return;
        }
  
                dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
                                cp, ei->CommandStatus);
        }
 -      cmd->scsi_done(cmd);
        cmd_free(h, cp);
 +      cmd->scsi_done(cmd);
  }
  
  static void hpsa_pci_unmap(struct pci_dev *pdev,
@@@ -2721,6 -2721,7 +2721,6 @@@ static struct CommandList *cmd_alloc(st
        } while (test_and_set_bit
                 (i & (BITS_PER_LONG - 1),
                  h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
 -      h->nr_allocs++;
        spin_unlock_irqrestore(&h->lock, flags);
  
        c = h->cmd_pool + i;
@@@ -2792,6 -2793,7 +2792,6 @@@ static void cmd_free(struct ctlr_info *
        spin_lock_irqsave(&h->lock, flags);
        clear_bit(i & (BITS_PER_LONG - 1),
                  h->cmd_pool_bits + (i / BITS_PER_LONG));
 -      h->nr_frees++;
        spin_unlock_irqrestore(&h->lock, flags);
  }
  
index 22f42f866f75e52e5512654738dbdd86e7c9fcd1,f4360c5ea6a9f2f7a6782180d529c5d04bcce729..16498e030c70392e2b5986f498d2c1b4ae4142e0
@@@ -674,6 -674,9 +674,6 @@@ lpfc_do_offline(struct lpfc_hba *phba, 
        int i;
        int rc;
  
 -      if (phba->pport->fc_flag & FC_OFFLINE_MODE)
 -              return 0;
 -
        init_completion(&online_compl);
        rc = lpfc_workq_post_event(phba, &status, &online_compl,
                              LPFC_EVT_OFFLINE_PREP);
@@@ -741,15 -744,14 +741,15 @@@ lpfc_selective_reset(struct lpfc_hba *p
        int status = 0;
        int rc;
  
 -      if ((!phba->cfg_enable_hba_reset) ||
 -          (phba->pport->fc_flag & FC_OFFLINE_MODE))
 +      if (!phba->cfg_enable_hba_reset)
                return -EACCES;
  
 -      status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
 +      if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
 +              status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
  
 -      if (status != 0)
 -              return status;
 +              if (status != 0)
 +                      return status;
 +      }
  
        init_completion(&online_compl);
        rc = lpfc_workq_post_event(phba, &status, &online_compl,
@@@ -816,7 -818,7 +816,7 @@@ lpfc_issue_reset(struct device *dev, st
   * the readyness after performing a firmware reset.
   *
   * Returns:
-  * zero for success, -EPERM when port does not have privilage to perform the
+  * zero for success, -EPERM when port does not have privilege to perform the
   * reset, -EIO when port timeout from recovering from the reset.
   *
   * Note:
@@@ -833,7 -835,7 +833,7 @@@ lpfc_sli4_pdev_status_reg_wait(struct l
        lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
                   &portstat_reg.word0);
  
-       /* verify if privilaged for the request operation */
+       /* verify if privileged for the request operation */
        if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
            !bf_get(lpfc_sliport_status_err, &portstat_reg))
                return -EPERM;
@@@ -925,9 -927,9 +925,9 @@@ lpfc_sli4_pdev_reg_request(struct lpfc_
        rc = lpfc_sli4_pdev_status_reg_wait(phba);
  
        if (rc == -EPERM) {
-               /* no privilage for reset */
+               /* no privilege for reset */
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                               "3150 No privilage to perform the requested "
+                               "3150 No privilege to perform the requested "
                                "access: x%x\n", reg_val);
        } else if (rc == -EIO) {
                /* reset failed, there is nothing more we can do */
@@@ -2589,12 -2591,9 +2589,12 @@@ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1
  
  /*
  # lun_queue_depth:  This parameter is used to limit the number of outstanding
 -# commands per FCP LUN. Value range is [1,128]. Default value is 30.
 +# commands per FCP LUN. Value range is [1,512]. Default value is 30.
 +# If this parameter value is greater than 1/8th the maximum number of exchanges
 +# supported by the HBA port, then the lun queue depth will be reduced to
 +# 1/8th the maximum number of exchanges.
  */
 -LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
 +LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
                  "Max number of FCP commands we can queue to a specific LUN");
  
  /*
  # commands per target port. Value range is [10,65535]. Default value is 65535.
  */
  LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535,
 -      "Max number of FCP commands we can queue to a specific target port");
 +                "Max number of FCP commands we can queue to a specific target port");
  
  /*
  # hba_queue_depth:  This parameter is used to limit the number of outstanding
@@@ -3949,14 -3948,6 +3949,14 @@@ LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3
  LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
                   "Use ADISC on rediscovery to authenticate FCP devices");
  
 +/*
 +# lpfc_first_burst_size: First burst size to use on the NPorts
 +# that support first burst.
 +# Value range is [0,65536]. Default value is 0.
 +*/
 +LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
 +                 "First burst size for Targets that support first burst");
 +
  /*
  # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
  # depth. Default value is 0. When the value of this parameter is zero the
@@@ -4120,6 -4111,25 +4120,6 @@@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255
  LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
            "MSI-X (2), if possible");
  
 -/*
 -# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
 -# This parameter is ignored and will eventually be depricated
 -#
 -# Value range is [1,7]. Default value is 4.
 -*/
 -LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
 -          LPFC_FCP_IO_CHAN_MAX,
 -          "Set the number of fast-path FCP work queues, if possible");
 -
 -/*
 -# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels
 -#
 -# Value range is [1,7]. Default value is 4.
 -*/
 -LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
 -          LPFC_FCP_IO_CHAN_MAX,
 -          "Set the number of fast-path FCP event queues, if possible");
 -
  /*
  # lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
  #
@@@ -4266,7 -4276,6 +4266,7 @@@ struct device_attribute *lpfc_hba_attrs
        &dev_attr_lpfc_devloss_tmo,
        &dev_attr_lpfc_fcp_class,
        &dev_attr_lpfc_use_adisc,
 +      &dev_attr_lpfc_first_burst_size,
        &dev_attr_lpfc_ack0,
        &dev_attr_lpfc_topology,
        &dev_attr_lpfc_scan_down,
        &dev_attr_lpfc_use_msi,
        &dev_attr_lpfc_fcp_imax,
        &dev_attr_lpfc_fcp_cpu_map,
 -      &dev_attr_lpfc_fcp_wq_count,
 -      &dev_attr_lpfc_fcp_eq_count,
        &dev_attr_lpfc_fcp_io_channel,
        &dev_attr_lpfc_enable_bg,
        &dev_attr_lpfc_soft_wwnn,
@@@ -4341,7 -4352,6 +4341,7 @@@ struct device_attribute *lpfc_vport_att
        &dev_attr_lpfc_restrict_login,
        &dev_attr_lpfc_fcp_class,
        &dev_attr_lpfc_use_adisc,
 +      &dev_attr_lpfc_first_burst_size,
        &dev_attr_lpfc_fdmi_on,
        &dev_attr_lpfc_max_luns,
        &dev_attr_nport_evt_cnt,
@@@ -5280,6 -5290,8 +5280,6 @@@ lpfc_get_cfgparam(struct lpfc_hba *phba
        lpfc_use_msi_init(phba, lpfc_use_msi);
        lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
        lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
 -      lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
 -      lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
        lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
        lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
        lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
@@@ -5319,7 -5331,6 +5319,7 @@@ lpfc_get_vport_cfgparam(struct lpfc_vpo
        lpfc_restrict_login_init(vport, lpfc_restrict_login);
        lpfc_fcp_class_init(vport, lpfc_fcp_class);
        lpfc_use_adisc_init(vport, lpfc_use_adisc);
 +      lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
        lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
        lpfc_fdmi_on_init(vport, lpfc_fdmi_on);
        lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
index bc270639c1c3a6c1f3b83c83be48ae99acaabcb5,fcbedd6053c4b9f026fffcbade61b77a60ccb0b7..79c13c3263f15a7afbc3b11c0bb5aca4345a306b
@@@ -2498,7 -2498,7 +2498,7 @@@ static int lpfcdiag_loop_get_xri(struc
        struct lpfc_sli_ct_request *ctreq = NULL;
        int ret_val = 0;
        int time_left;
 -      int iocb_stat = 0;
 +      int iocb_stat = IOCB_SUCCESS;
        unsigned long flags;
  
        *txxri = 0;
  
        cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
        cmdiocbq->vport = phba->pport;
 +      cmdiocbq->iocb_cmpl = NULL;
  
        iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
                                rspiocbq,
@@@ -2628,7 -2627,7 +2628,7 @@@ err_get_xri_exit
   * @phba: Pointer to HBA context object
   *
   * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
-  * retruns the pointer to the buffer.
+  * returns the pointer to the buffer.
   **/
  static struct lpfc_dmabuf *
  lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
@@@ -2964,7 -2963,7 +2964,7 @@@ lpfc_bsg_diag_loopback_run(struct fc_bs
        uint8_t *ptr = NULL, *rx_databuf = NULL;
        int rc = 0;
        int time_left;
 -      int iocb_stat;
 +      int iocb_stat = IOCB_SUCCESS;
        unsigned long flags;
        void *dataout = NULL;
        uint32_t total_mem;
        }
        cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
        cmdiocbq->vport = phba->pport;
 +      cmdiocbq->iocb_cmpl = NULL;
        iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
                                             rspiocbq, (phba->fc_ratov * 2) +
                                             LPFC_DRVR_TIMEOUT);
@@@ -3211,7 -3209,7 +3211,7 @@@ err_loopback_test_exit
        lpfc_bsg_event_unref(evt); /* delete */
        spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  
 -      if (cmdiocbq != NULL)
 +      if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
                lpfc_sli_release_iocbq(phba, cmdiocbq);
  
        if (rspiocbq != NULL)
index b989add77ec3977f6c379f28ddb3d89be757bcc9,5afdc3a25014c52ed8f0a449c7dfe4e3dc6c2c9d..aa57bf0af574e872ab6e00dae36f7fe4df1a2199
@@@ -125,7 -125,7 +125,7 @@@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc
        uint32_t len;
        uint32_t oper;
  
 -      if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
 +      if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
                ret = -EINVAL;
                goto exit_fcp_prio_cfg;
        }
@@@ -559,7 -559,7 +559,7 @@@ qla81xx_reset_loopback_mode(scsi_qla_ho
        uint16_t new_config[4];
        struct qla_hw_data *ha = vha->hw;
  
 -      if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
 +      if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
                goto done_reset_internal;
  
        memset(new_config, 0 , sizeof(new_config));
@@@ -627,10 -627,9 +627,10 @@@ qla81xx_set_loopback_mode(scsi_qla_host
  {
        int ret = 0;
        int rval = 0;
 +      unsigned long rem_tmo = 0, current_tmo = 0;
        struct qla_hw_data *ha = vha->hw;
  
 -      if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
 +      if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
                goto done_set_internal;
  
        if (mode == INTERNAL_LOOPBACK)
        }
  
        /* Wait for DCBX complete event */
 -      if (!wait_for_completion_timeout(&ha->dcbx_comp,
 -          (DCBX_COMP_TIMEOUT * HZ))) {
 +      current_tmo = DCBX_COMP_TIMEOUT * HZ;
 +      while (1) {
 +              rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
 +                  current_tmo);
 +              if (!ha->idc_extend_tmo || rem_tmo) {
 +                      ha->idc_extend_tmo = 0;
 +                      break;
 +              }
 +              current_tmo = ha->idc_extend_tmo * HZ;
 +              ha->idc_extend_tmo = 0;
 +      }
 +
 +      if (!rem_tmo) {
                ql_dbg(ql_dbg_user, vha, 0x7022,
                    "DCBX completion not received.\n");
                ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
        }
  
        ha->notify_dcbx_comp = 0;
 +      ha->idc_extend_tmo = 0;
  
  done_set_internal:
        return rval;
@@@ -786,7 -773,7 +786,7 @@@ qla2x00_process_loopback(struct fc_bsg_
  
        if (atomic_read(&vha->loop_state) == LOOP_READY &&
            (ha->current_topology == ISP_CFG_F ||
 -          ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
 +          ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
            le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
            && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
                elreq.options == EXTERNAL_LOOPBACK) {
                command_sent = INT_DEF_LB_ECHO_CMD;
                rval = qla2x00_echo_test(vha, &elreq, response);
        } else {
 -              if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
 +              if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
                        memset(config, 0, sizeof(config));
                        memset(new_config, 0, sizeof(new_config));
  
                            "elreq.options=%04x\n", elreq.options);
  
                        if (elreq.options == EXTERNAL_LOOPBACK)
 -                              if (IS_QLA8031(ha))
 +                              if (IS_QLA8031(ha) || IS_QLA8044(ha))
                                        rval = qla81xx_set_loopback_mode(vha,
                                            config, new_config, elreq.options);
                                else
@@@ -1279,7 -1266,6 +1279,7 @@@ qla24xx_iidma(struct fc_bsg_job *bsg_jo
        int rval = 0;
        struct qla_port_param *port_param = NULL;
        fc_port_t *fcport = NULL;
 +      int found = 0;
        uint16_t mb[MAILBOX_REGISTER_COUNT];
        uint8_t *rsp_ptr = NULL;
  
                if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
                        fcport->port_name, sizeof(fcport->port_name)))
                        continue;
 +
 +              found = 1;
                break;
        }
  
 -      if (!fcport) {
 +      if (!found) {
                ql_log(ql_log_warn, vha, 0x7049,
                    "Failed to find port.\n");
                return -EINVAL;
  
        if (rval) {
                ql_log(ql_log_warn, vha, 0x704c,
 -                  "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
 -                  "%04x %x %04x %04x.\n", fcport->port_name[0],
 -                  fcport->port_name[1], fcport->port_name[2],
 -                  fcport->port_name[3], fcport->port_name[4],
 -                  fcport->port_name[5], fcport->port_name[6],
 -                  fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
 +                  "iIDMA cmd failed for %8phN -- "
 +                  "%04x %x %04x %04x.\n", fcport->port_name,
 +                  rval, fcport->fp_speed, mb[0], mb[1]);
                rval = (DID_ERROR << 16);
        } else {
                if (!port_param->mode) {
@@@ -1895,7 -1882,7 +1895,7 @@@ done
        bsg_job->reply->reply_payload_rcv_len = 0;
        bsg_job->reply->result = (DID_OK) << 16;
        bsg_job->job_done(bsg_job);
-       /* Always retrun success, vendor rsp carries correct status */
+       /* Always return success, vendor rsp carries correct status */
        return 0;
  }
  
index 2482975d72b256cd2426e10bc53d2f5acff895aa,ab4be107cda139db1a11fbaad4942ac930efc9da..62ee7131b20420a7d1d363682dde7192ed152758
@@@ -294,7 -294,7 +294,7 @@@ premature_exit
   * Context:
   *    Kernel context.
   */
 -static int
 +int
  qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
  {
        int rval;
@@@ -775,29 -775,6 +775,29 @@@ qlafx00_lun_reset(fc_port_t *fcport, un
        return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
  }
  
 +int
 +qlafx00_loop_reset(scsi_qla_host_t *vha)
 +{
 +      int ret;
 +      struct fc_port *fcport;
 +      struct qla_hw_data *ha = vha->hw;
 +
 +      if (ql2xtargetreset) {
 +              list_for_each_entry(fcport, &vha->vp_fcports, list) {
 +                      if (fcport->port_type != FCT_TARGET)
 +                              continue;
 +
 +                      ret = ha->isp_ops->target_reset(fcport, 0, 0);
 +                      if (ret != QLA_SUCCESS) {
 +                              ql_dbg(ql_dbg_taskm, vha, 0x803d,
 +                                  "Bus Reset failed: Reset=%d "
 +                                  "d_id=%x.\n", ret, fcport->d_id.b24);
 +                      }
 +              }
 +      }
 +      return QLA_SUCCESS;
 +}
 +
  int
  qlafx00_iospace_config(struct qla_hw_data *ha)
  {
@@@ -941,23 -918,12 +941,23 @@@ qlafx00_init_fw_ready(scsi_qla_host_t *
        struct qla_hw_data *ha = vha->hw;
        struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
        uint32_t aenmbx, aenmbx7 = 0;
 +      uint32_t pseudo_aen;
        uint32_t state[5];
        bool done = false;
  
        /* 30 seconds wait - Adjust if required */
        wait_time = 30;
  
 +      pseudo_aen = RD_REG_DWORD(&reg->pseudoaen);
 +      if (pseudo_aen == 1) {
 +              aenmbx7 = RD_REG_DWORD(&reg->initval7);
 +              ha->mbx_intr_code = MSW(aenmbx7);
 +              ha->rqstq_intr_code = LSW(aenmbx7);
 +              rval = qlafx00_driver_shutdown(vha, 10);
 +              if (rval != QLA_SUCCESS)
 +                      qlafx00_soft_reset(vha);
 +      }
 +
        /* wait time before firmware ready */
        wtime = jiffies + (wait_time * HZ);
        do {
@@@ -1383,22 -1349,21 +1383,22 @@@ qlafx00_configure_devices(scsi_qla_host
  }
  
  static void
 -qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha)
 +qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
  {
        struct qla_hw_data *ha = vha->hw;
        fc_port_t *fcport;
  
        vha->flags.online = 0;
 -      ha->flags.chip_reset_done = 0;
        ha->mr.fw_hbt_en = 0;
 -      clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 -      vha->qla_stats.total_isp_aborts++;
 -
 -      ql_log(ql_log_info, vha, 0x013f,
 -          "Performing ISP error recovery - ha = %p.\n", ha);
  
 -      ha->isp_ops->reset_chip(vha);
 +      if (!critemp) {
 +              ha->flags.chip_reset_done = 0;
 +              clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 +              vha->qla_stats.total_isp_aborts++;
 +              ql_log(ql_log_info, vha, 0x013f,
 +                  "Performing ISP error recovery - ha = %p.\n", ha);
 +              ha->isp_ops->reset_chip(vha);
 +      }
  
        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                atomic_set(&vha->loop_state, LOOP_DOWN);
        }
  
        if (!ha->flags.eeh_busy) {
 -              /* Requeue all commands in outstanding command list. */
 -              qla2x00_abort_all_cmds(vha, DID_RESET << 16);
 +              if (critemp) {
 +                      qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
 +              } else {
 +                      /* Requeue all commands in outstanding command list. */
 +                      qla2x00_abort_all_cmds(vha, DID_RESET << 16);
 +              }
        }
  
        qla2x00_free_irqs(vha);
 -      set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
 +      if (critemp)
 +              set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags);
 +      else
 +              set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
  
        /* Clear the Interrupts */
        QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
@@@ -1517,7 -1475,6 +1517,7 @@@ qlafx00_timer_routine(scsi_qla_host_t *
        uint32_t fw_heart_beat;
        uint32_t aenmbx0;
        struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
 +      uint32_t tempc;
  
        /* Check firmware health */
        if (ha->mr.fw_hbt_cnt)
                } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
                        ha->mr.fw_reset_timer_tick =
                            QLAFX00_MAX_RESET_INTERVAL;
 +              } else if (aenmbx0 == MBA_FW_RESET_FCT) {
 +                      ha->mr.fw_reset_timer_tick =
 +                          QLAFX00_MAX_RESET_INTERVAL;
                }
                ha->mr.old_aenmbx0_state = aenmbx0;
                ha->mr.fw_reset_timer_tick--;
        }
 +      if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
 +              /*
 +               * Critical temperature recovery to be
 +               * performed in timer routine
 +               */
 +              if (ha->mr.fw_critemp_timer_tick == 0) {
 +                      tempc = QLAFX00_GET_TEMPERATURE(ha);
 +                      ql_dbg(ql_dbg_timer, vha, 0x6012,
 +                          "ISPFx00(%s): Critical temp timer, "
 +                          "current SOC temperature: %d\n",
 +                          __func__, tempc);
 +                      if (tempc < ha->mr.critical_temperature) {
 +                              set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 +                              clear_bit(FX00_CRITEMP_RECOVERY,
 +                                  &vha->dpc_flags);
 +                              qla2xxx_wake_dpc(vha);
 +                      }
 +                      ha->mr.fw_critemp_timer_tick =
 +                          QLAFX00_CRITEMP_INTERVAL;
 +              } else {
 +                      ha->mr.fw_critemp_timer_tick--;
 +              }
 +      }
  }
  
  /*
@@@ -1639,7 -1570,7 +1639,7 @@@ qlafx00_reset_initialize(scsi_qla_host_
  
        if (vha->flags.online) {
                scsi_block_requests(vha->host);
 -              qlafx00_abort_isp_cleanup(vha);
 +              qlafx00_abort_isp_cleanup(vha, false);
        }
  
        ql_log(ql_log_info, vha, 0x0143,
@@@ -1671,15 -1602,7 +1671,15 @@@ qlafx00_abort_isp(scsi_qla_host_t *vha
                }
  
                scsi_block_requests(vha->host);
 -              qlafx00_abort_isp_cleanup(vha);
 +              qlafx00_abort_isp_cleanup(vha, false);
 +      } else {
 +              scsi_block_requests(vha->host);
 +              clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 +              vha->qla_stats.total_isp_aborts++;
 +              ha->isp_ops->reset_chip(vha);
 +              set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
 +              /* Clear the Interrupts */
 +              QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
        }
  
        ql_log(ql_log_info, vha, 0x0145,
@@@ -1765,15 -1688,6 +1765,15 @@@ qlafx00_process_aen(struct scsi_qla_hos
                aen_code = FCH_EVT_LINKDOWN;
                aen_data = 0;
                break;
 +      case QLAFX00_MBA_TEMP_CRIT:     /* Critical temperature event */
 +              ql_log(ql_log_info, vha, 0x5082,
 +                  "Process critical temperature event "
 +                  "aenmb[0]: %x\n",
 +                  evt->u.aenfx.evtcode);
 +              scsi_block_requests(vha->host);
 +              qlafx00_abort_isp_cleanup(vha, true);
 +              scsi_unblock_requests(vha->host);
 +              break;
        }
  
        fc_host_post_event(vha->host, fc_get_event_number(),
@@@ -1865,7 -1779,7 +1865,7 @@@ qlafx00_fx_disc(scsi_qla_host_t *vha, f
                p_sysid = utsname();
                if (!p_sysid) {
                        ql_log(ql_log_warn, vha, 0x303c,
-                           "Not able to get the system informtion\n");
+                           "Not able to get the system information\n");
                        goto done_free_sp;
                }
                break;
                    sizeof(vha->hw->mr.uboot_version));
                memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
                    sizeof(vha->hw->mr.fru_serial_num));
 +              vha->hw->mr.critical_temperature =
 +                  (pinfo->nominal_temp_value) ?
 +                  pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD;
 +              ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
 +                  QLAFX00_EXTENDED_IO_EN_MASK) != 0;
        } else if (fx_type == FXDISC_GET_PORT_INFO) {
                struct port_info_data *pinfo =
                    (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
@@@ -2112,7 -2021,6 +2112,7 @@@ qlafx00_initialize_adapter(scsi_qla_hos
  {
        int     rval;
        struct qla_hw_data *ha = vha->hw;
 +      uint32_t tempc;
  
        /* Clear adapter flags. */
        vha->flags.online = 0;
        vha->flags.reset_active = 0;
        ha->flags.pci_channel_io_perm_failure = 0;
        ha->flags.eeh_busy = 0;
 -      ha->thermal_support = 0;
        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
        atomic_set(&vha->loop_state, LOOP_DOWN);
        vha->device_flags = DFLG_NO_CABLE;
        rval = qla2x00_init_rings(vha);
        ha->flags.chip_reset_done = 1;
  
 +      tempc = QLAFX00_GET_TEMPERATURE(ha);
 +      ql_dbg(ql_dbg_init, vha, 0x0152,
 +          "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n",
 +          __func__, tempc);
 +
        return rval;
  }
  
@@@ -2622,13 -2526,16 +2622,13 @@@ check_scsi_status
  
        if (logit)
                ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
 -                  "FCP command status: 0x%x-0x%x (0x%x) "
 -                  "nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x"
 -                  "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
 -                  "rsp_info=0x%x resid=0x%x fw_resid=0x%x "
 -                  "sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n",
 +                  "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
 +                  "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
 +                  "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, "
 +                  "par_sense_len=0x%x, rsp_info_len=0x%x\n",
                    comp_status, scsi_status, res, vha->host_no,
                    cp->device->id, cp->device->lun, fcport->tgt_id,
 -                  lscsi_status, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2],
 -                  cp->cmnd[3], cp->cmnd[4], cp->cmnd[5], cp->cmnd[6],
 -                  cp->cmnd[7], cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp),
 +                  lscsi_status, cp->cmnd, scsi_bufflen(cp),
                    rsp_info_len, resid_len, fw_resid_len, sense_len,
                    par_sense_len, rsp_info_len);
  
@@@ -2813,6 -2720,9 +2813,6 @@@ qlafx00_process_response_queue(struct s
        struct sts_entry_fx00 *pkt;
        response_t *lptr;
  
 -      if (!vha->flags.online)
 -              return;
 -
        while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) !=
            RESPONSE_PROCESSED) {
                lptr = rsp->ring_ptr;
@@@ -2914,28 -2824,6 +2914,28 @@@ qlafx00_async_event(scsi_qla_host_t *vh
                    ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
                data_size = 4;
                break;
 +
 +      case QLAFX00_MBA_TEMP_OVER:     /* Over temperature event */
 +              ql_log(ql_log_info, vha, 0x5085,
 +                  "Asynchronous over temperature event received "
 +                  "aenmb[0]: %x\n",
 +                  ha->aenmb[0]);
 +              break;
 +
 +      case QLAFX00_MBA_TEMP_NORM:     /* Normal temperature event */
 +              ql_log(ql_log_info, vha, 0x5086,
 +                  "Asynchronous normal temperature event received "
 +                  "aenmb[0]: %x\n",
 +                  ha->aenmb[0]);
 +              break;
 +
 +      case QLAFX00_MBA_TEMP_CRIT:     /* Critical temperature event */
 +              ql_log(ql_log_info, vha, 0x5083,
 +                  "Asynchronous critical temperature event received "
 +                  "aenmb[0]: %x\n",
 +              ha->aenmb[0]);
 +              break;
 +
        default:
                ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
                ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
index e3946e44e076e2aaf5bd0e7813554f65a9062745,1c2ba17760b8fecb5e0a23872e7a1069123d5c9d..8c11355dec233595a04a33877f832084af933168
@@@ -40,7 -40,7 +40,7 @@@
   * to glue code.  These bitbang setup() and cleanup() routines are always
   * used, though maybe they're called from controller-aware code.
   *
-  * chipselect() and friends may use use spi_device->controller_data and
+  * chipselect() and friends may use spi_device->controller_data and
   * controller registers as appropriate.
   *
   *
@@@ -255,140 -255,150 +255,140 @@@ static int spi_bitbang_bufs(struct spi_
   * Drivers can provide word-at-a-time i/o primitives, or provide
   * transfer-at-a-time ones to leverage dma or fifo hardware.
   */
 -static void bitbang_work(struct work_struct *work)
 +
 +static int spi_bitbang_prepare_hardware(struct spi_master *spi)
  {
 -      struct spi_bitbang      *bitbang =
 -              container_of(work, struct spi_bitbang, work);
 +      struct spi_bitbang      *bitbang;
        unsigned long           flags;
 -      struct spi_message      *m, *_m;
 +
 +      bitbang = spi_master_get_devdata(spi);
  
        spin_lock_irqsave(&bitbang->lock, flags);
        bitbang->busy = 1;
 -      list_for_each_entry_safe(m, _m, &bitbang->queue, queue) {
 -              struct spi_device       *spi;
 -              unsigned                nsecs;
 -              struct spi_transfer     *t = NULL;
 -              unsigned                tmp;
 -              unsigned                cs_change;
 -              int                     status;
 -              int                     do_setup = -1;
 -
 -              list_del(&m->queue);
 -              spin_unlock_irqrestore(&bitbang->lock, flags);
 -
 -              /* FIXME this is made-up ... the correct value is known to
 -               * word-at-a-time bitbang code, and presumably chipselect()
 -               * should enforce these requirements too?
 -               */
 -              nsecs = 100;
 +      spin_unlock_irqrestore(&bitbang->lock, flags);
  
 -              spi = m->spi;
 -              tmp = 0;
 -              cs_change = 1;
 -              status = 0;
 +      return 0;
 +}
  
 -              list_for_each_entry (t, &m->transfers, transfer_list) {
 -
 -                      /* override speed or wordsize? */
 -                      if (t->speed_hz || t->bits_per_word)
 -                              do_setup = 1;
 -
 -                      /* init (-1) or override (1) transfer params */
 -                      if (do_setup != 0) {
 -                              status = bitbang->setup_transfer(spi, t);
 -                              if (status < 0)
 -                                      break;
 -                              if (do_setup == -1)
 -                                      do_setup = 0;
 -                      }
 -
 -                      /* set up default clock polarity, and activate chip;
 -                       * this implicitly updates clock and spi modes as
 -                       * previously recorded for this device via setup().
 -                       * (and also deselects any other chip that might be
 -                       * selected ...)
 -                       */
 -                      if (cs_change) {
 -                              bitbang->chipselect(spi, BITBANG_CS_ACTIVE);
 -                              ndelay(nsecs);
 -                      }
 -                      cs_change = t->cs_change;
 -                      if (!t->tx_buf && !t->rx_buf && t->len) {
 -                              status = -EINVAL;
 -                              break;
 -                      }
 +static int spi_bitbang_transfer_one(struct spi_master *master,
 +                                  struct spi_message *m)
 +{
 +      struct spi_bitbang      *bitbang;
 +      unsigned                nsecs;
 +      struct spi_transfer     *t = NULL;
 +      unsigned                cs_change;
 +      int                     status;
 +      int                     do_setup = -1;
 +      struct spi_device       *spi = m->spi;
 +
 +      bitbang = spi_master_get_devdata(master);
 +
 +      /* FIXME this is made-up ... the correct value is known to
 +       * word-at-a-time bitbang code, and presumably chipselect()
 +       * should enforce these requirements too?
 +       */
 +      nsecs = 100;
  
 -                      /* transfer data.  the lower level code handles any
 -                       * new dma mappings it needs. our caller always gave
 -                       * us dma-safe buffers.
 -                       */
 -                      if (t->len) {
 -                              /* REVISIT dma API still needs a designated
 -                               * DMA_ADDR_INVALID; ~0 might be better.
 -                               */
 -                              if (!m->is_dma_mapped)
 -                                      t->rx_dma = t->tx_dma = 0;
 -                              status = bitbang->txrx_bufs(spi, t);
 -                      }
 -                      if (status > 0)
 -                              m->actual_length += status;
 -                      if (status != t->len) {
 -                              /* always report some kind of error */
 -                              if (status >= 0)
 -                                      status = -EREMOTEIO;
 +      cs_change = 1;
 +      status = 0;
 +
 +      list_for_each_entry (t, &m->transfers, transfer_list) {
 +
 +              /* override speed or wordsize? */
 +              if (t->speed_hz || t->bits_per_word)
 +                      do_setup = 1;
 +
 +              /* init (-1) or override (1) transfer params */
 +              if (do_setup != 0) {
 +                      status = bitbang->setup_transfer(spi, t);
 +                      if (status < 0)
                                break;
 -                      }
 -                      status = 0;
 -
 -                      /* protocol tweaks before next transfer */
 -                      if (t->delay_usecs)
 -                              udelay(t->delay_usecs);
 -
 -                      if (cs_change && !list_is_last(&t->transfer_list, &m->transfers)) {
 -                              /* sometimes a short mid-message deselect of the chip
 -                               * may be needed to terminate a mode or command
 -                               */
 -                              ndelay(nsecs);
 -                              bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
 -                              ndelay(nsecs);
 -                      }
 +                      if (do_setup == -1)
 +                              do_setup = 0;
                }
  
 -              m->status = status;
 -              m->complete(m->context);
 +              /* set up default clock polarity, and activate chip;
 +               * this implicitly updates clock and spi modes as
 +               * previously recorded for this device via setup().
 +               * (and also deselects any other chip that might be
 +               * selected ...)
 +               */
 +              if (cs_change) {
 +                      bitbang->chipselect(spi, BITBANG_CS_ACTIVE);
 +                      ndelay(nsecs);
 +              }
 +              cs_change = t->cs_change;
 +              if (!t->tx_buf && !t->rx_buf && t->len) {
 +                      status = -EINVAL;
 +                      break;
 +              }
  
 -              /* normally deactivate chipselect ... unless no error and
 -               * cs_change has hinted that the next message will probably
 -               * be for this chip too.
 +              /* transfer data.  the lower level code handles any
 +               * new dma mappings it needs. our caller always gave
 +               * us dma-safe buffers.
                 */
 -              if (!(status == 0 && cs_change)) {
 +              if (t->len) {
 +                      /* REVISIT dma API still needs a designated
 +                       * DMA_ADDR_INVALID; ~0 might be better.
 +                       */
 +                      if (!m->is_dma_mapped)
 +                              t->rx_dma = t->tx_dma = 0;
 +                      status = bitbang->txrx_bufs(spi, t);
 +              }
 +              if (status > 0)
 +                      m->actual_length += status;
 +              if (status != t->len) {
 +                      /* always report some kind of error */
 +                      if (status >= 0)
 +                              status = -EREMOTEIO;
 +                      break;
 +              }
 +              status = 0;
 +
 +              /* protocol tweaks before next transfer */
 +              if (t->delay_usecs)
 +                      udelay(t->delay_usecs);
 +
 +              if (cs_change && !list_is_last(&t->transfer_list, &m->transfers)) {
 +                      /* sometimes a short mid-message deselect of the chip
 +                       * may be needed to terminate a mode or command
 +                       */
                        ndelay(nsecs);
                        bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
                        ndelay(nsecs);
                }
 +      }
 +
 +      m->status = status;
  
 -              spin_lock_irqsave(&bitbang->lock, flags);
 +      /* normally deactivate chipselect ... unless no error and
 +       * cs_change has hinted that the next message will probably
 +       * be for this chip too.
 +       */
 +      if (!(status == 0 && cs_change)) {
 +              ndelay(nsecs);
 +              bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
 +              ndelay(nsecs);
        }
 -      bitbang->busy = 0;
 -      spin_unlock_irqrestore(&bitbang->lock, flags);
 +
 +      spi_finalize_current_message(master);
 +
 +      return status;
  }
  
 -/**
 - * spi_bitbang_transfer - default submit to transfer queue
 - */
 -int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m)
 +static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
  {
 -      struct spi_bitbang      *bitbang;
 +      struct spi_bitbang      *bitbang;
        unsigned long           flags;
 -      int                     status = 0;
  
 -      m->actual_length = 0;
 -      m->status = -EINPROGRESS;
 -
 -      bitbang = spi_master_get_devdata(spi->master);
 +      bitbang = spi_master_get_devdata(spi);
  
        spin_lock_irqsave(&bitbang->lock, flags);
 -      if (!spi->max_speed_hz)
 -              status = -ENETDOWN;
 -      else {
 -              list_add_tail(&m->queue, &bitbang->queue);
 -              queue_work(bitbang->workqueue, &bitbang->work);
 -      }
 +      bitbang->busy = 0;
        spin_unlock_irqrestore(&bitbang->lock, flags);
  
 -      return status;
 +      return 0;
  }
 -EXPORT_SYMBOL_GPL(spi_bitbang_transfer);
  
  /*----------------------------------------------------------------------*/
  
  int spi_bitbang_start(struct spi_bitbang *bitbang)
  {
        struct spi_master *master = bitbang->master;
 -      int status;
  
        if (!master || !bitbang->chipselect)
                return -EINVAL;
  
 -      INIT_WORK(&bitbang->work, bitbang_work);
        spin_lock_init(&bitbang->lock);
 -      INIT_LIST_HEAD(&bitbang->queue);
  
        if (!master->mode_bits)
                master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
  
 -      if (!master->transfer)
 -              master->transfer = spi_bitbang_transfer;
 +      if (master->transfer || master->transfer_one_message)
 +              return -EINVAL;
 +
 +      master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
 +      master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
 +      master->transfer_one_message = spi_bitbang_transfer_one;
 +
        if (!bitbang->txrx_bufs) {
                bitbang->use_dma = 0;
                bitbang->txrx_bufs = spi_bitbang_bufs;
                        master->setup = spi_bitbang_setup;
                        master->cleanup = spi_bitbang_cleanup;
                }
 -      } else if (!master->setup)
 -              return -EINVAL;
 -      if (master->transfer == spi_bitbang_transfer &&
 -                      !bitbang->setup_transfer)
 -              return -EINVAL;
 -
 -      /* this task is the only thing to touch the SPI bits */
 -      bitbang->busy = 0;
 -      bitbang->workqueue = create_singlethread_workqueue(
 -                      dev_name(master->dev.parent));
 -      if (bitbang->workqueue == NULL) {
 -              status = -EBUSY;
 -              goto err1;
        }
  
        /* driver may get busy before register() returns, especially
         * if someone registered boardinfo for devices
         */
 -      status = spi_register_master(master);
 -      if (status < 0)
 -              goto err2;
 -
 -      return status;
 -
 -err2:
 -      destroy_workqueue(bitbang->workqueue);
 -err1:
 -      return status;
 +      return spi_register_master(master);
  }
  EXPORT_SYMBOL_GPL(spi_bitbang_start);
  
@@@ -460,6 -490,10 +460,6 @@@ int spi_bitbang_stop(struct spi_bitban
  {
        spi_unregister_master(bitbang->master);
  
 -      WARN_ON(!list_empty(&bitbang->queue));
 -
 -      destroy_workqueue(bitbang->workqueue);
 -
        return 0;
  }
  EXPORT_SYMBOL_GPL(spi_bitbang_stop);
diff --combined fs/ext4/super.c
index 049c8a8bdc0eab2e7dbdfdcf67a45fd43b31a95b,ffdfe385b029dd3f291ec10b9766f113324e4cdd..2c2e6cbc6bedc549938262e50be8e118d1cb6de7
@@@ -162,7 -162,7 +162,7 @@@ void *ext4_kvmalloc(size_t size, gfp_t 
  {
        void *ret;
  
-       ret = kmalloc(size, flags);
+       ret = kmalloc(size, flags | __GFP_NOWARN);
        if (!ret)
                ret = __vmalloc(size, flags, PAGE_KERNEL);
        return ret;
@@@ -172,7 -172,7 +172,7 @@@ void *ext4_kvzalloc(size_t size, gfp_t 
  {
        void *ret;
  
-       ret = kzalloc(size, flags);
+       ret = kzalloc(size, flags | __GFP_NOWARN);
        if (!ret)
                ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
        return ret;
@@@ -762,7 -762,9 +762,7 @@@ static void ext4_put_super(struct super
        ext4_unregister_li_request(sb);
        dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
  
 -      flush_workqueue(sbi->unrsv_conversion_wq);
        flush_workqueue(sbi->rsv_conversion_wq);
 -      destroy_workqueue(sbi->unrsv_conversion_wq);
        destroy_workqueue(sbi->rsv_conversion_wq);
  
        if (sbi->s_journal) {
@@@ -873,12 -875,14 +873,12 @@@ static struct inode *ext4_alloc_inode(s
  #endif
        ei->jinode = NULL;
        INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
 -      INIT_LIST_HEAD(&ei->i_unrsv_conversion_list);
        spin_lock_init(&ei->i_completed_io_lock);
        ei->i_sync_tid = 0;
        ei->i_datasync_tid = 0;
        atomic_set(&ei->i_ioend_count, 0);
        atomic_set(&ei->i_unwritten, 0);
        INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
 -      INIT_WORK(&ei->i_unrsv_conversion_work, ext4_end_io_unrsv_work);
  
        return &ei->vfs_inode;
  }
@@@ -1130,8 -1134,8 +1130,8 @@@ enum 
        Opt_nouid32, Opt_debug, Opt_removed,
        Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
        Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
 -      Opt_commit, Opt_min_batch_time, Opt_max_batch_time,
 -      Opt_journal_dev, Opt_journal_checksum, Opt_journal_async_commit,
 +      Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
 +      Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
        Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
        Opt_data_err_abort, Opt_data_err_ignore,
        Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
@@@ -1175,7 -1179,6 +1175,7 @@@ static const match_table_t tokens = 
        {Opt_min_batch_time, "min_batch_time=%u"},
        {Opt_max_batch_time, "max_batch_time=%u"},
        {Opt_journal_dev, "journal_dev=%u"},
 +      {Opt_journal_path, "journal_path=%s"},
        {Opt_journal_checksum, "journal_checksum"},
        {Opt_journal_async_commit, "journal_async_commit"},
        {Opt_abort, "abort"},
@@@ -1335,7 -1338,6 +1335,7 @@@ static int clear_qf_name(struct super_b
  #define MOPT_NO_EXT2  0x0100
  #define MOPT_NO_EXT3  0x0200
  #define MOPT_EXT4_ONLY        (MOPT_NO_EXT2 | MOPT_NO_EXT3)
 +#define MOPT_STRING   0x0400
  
  static const struct mount_opts {
        int     token;
        {Opt_delalloc, EXT4_MOUNT_DELALLOC,
         MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
        {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
 -       MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT},
 +       MOPT_EXT4_ONLY | MOPT_CLEAR},
        {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
         MOPT_EXT4_ONLY | MOPT_SET},
        {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
        {Opt_resuid, 0, MOPT_GTE0},
        {Opt_resgid, 0, MOPT_GTE0},
        {Opt_journal_dev, 0, MOPT_GTE0},
 +      {Opt_journal_path, 0, MOPT_STRING},
        {Opt_journal_ioprio, 0, MOPT_GTE0},
        {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
        {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
@@@ -1479,7 -1480,7 +1479,7 @@@ static int handle_mount_opt(struct supe
                return -1;
        }
  
 -      if (args->from && match_int(args, &arg))
 +      if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
                return -1;
        if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
                return -1;
                        return -1;
                }
                *journal_devnum = arg;
 +      } else if (token == Opt_journal_path) {
 +              char *journal_path;
 +              struct inode *journal_inode;
 +              struct path path;
 +              int error;
 +
 +              if (is_remount) {
 +                      ext4_msg(sb, KERN_ERR,
 +                               "Cannot specify journal on remount");
 +                      return -1;
 +              }
 +              journal_path = match_strdup(&args[0]);
 +              if (!journal_path) {
 +                      ext4_msg(sb, KERN_ERR, "error: could not dup "
 +                              "journal device string");
 +                      return -1;
 +              }
 +
 +              error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
 +              if (error) {
 +                      ext4_msg(sb, KERN_ERR, "error: could not find "
 +                              "journal device path: error %d", error);
 +                      kfree(journal_path);
 +                      return -1;
 +              }
 +
 +              journal_inode = path.dentry->d_inode;
 +              if (!S_ISBLK(journal_inode->i_mode)) {
 +                      ext4_msg(sb, KERN_ERR, "error: journal path %s "
 +                              "is not a block device", journal_path);
 +                      path_put(&path);
 +                      kfree(journal_path);
 +                      return -1;
 +              }
 +
 +              *journal_devnum = new_encode_dev(journal_inode->i_rdev);
 +              path_put(&path);
 +              kfree(journal_path);
        } else if (token == Opt_journal_ioprio) {
                if (arg > 7) {
                        ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
@@@ -3520,7 -3483,7 +3520,7 @@@ static int ext4_fill_super(struct super
                }
                if (test_opt(sb, DIOREAD_NOLOCK)) {
                        ext4_msg(sb, KERN_ERR, "can't mount with "
 -                               "both data=journal and delalloc");
 +                               "both data=journal and dioread_nolock");
                        goto failed_mount;
                }
                if (test_opt(sb, DELALLOC))
@@@ -3991,6 -3954,14 +3991,6 @@@ no_journal
                goto failed_mount4;
        }
  
 -      EXT4_SB(sb)->unrsv_conversion_wq =
 -              alloc_workqueue("ext4-unrsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
 -      if (!EXT4_SB(sb)->unrsv_conversion_wq) {
 -              printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
 -              ret = -ENOMEM;
 -              goto failed_mount4;
 -      }
 -
        /*
         * The jbd2_journal_load will have done any necessary log recovery,
         * so we can safely mount the rest of the filesystem now.
@@@ -4144,6 -4115,8 +4144,6 @@@ failed_mount4
        ext4_msg(sb, KERN_ERR, "mount failed");
        if (EXT4_SB(sb)->rsv_conversion_wq)
                destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
 -      if (EXT4_SB(sb)->unrsv_conversion_wq)
 -              destroy_workqueue(EXT4_SB(sb)->unrsv_conversion_wq);
  failed_mount_wq:
        if (sbi->s_journal) {
                jbd2_journal_destroy(sbi->s_journal);
@@@ -4591,6 -4564,7 +4591,6 @@@ static int ext4_sync_fs(struct super_bl
  
        trace_ext4_sync_fs(sb, wait);
        flush_workqueue(sbi->rsv_conversion_wq);
 -      flush_workqueue(sbi->unrsv_conversion_wq);
        /*
         * Writeback quota in non-journalled quota case - journalled quota has
         * no dirty dquots
@@@ -4626,6 -4600,7 +4626,6 @@@ static int ext4_sync_fs_nojournal(struc
  
        trace_ext4_sync_fs(sb, wait);
        flush_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
 -      flush_workqueue(EXT4_SB(sb)->unrsv_conversion_wq);
        dquot_writeback_dquots(sb, -1);
        if (wait && test_opt(sb, BARRIER))
                ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
@@@ -4752,21 -4727,6 +4752,21 @@@ static int ext4_remount(struct super_bl
                goto restore_opts;
        }
  
 +      if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
 +              if (test_opt2(sb, EXPLICIT_DELALLOC)) {
 +                      ext4_msg(sb, KERN_ERR, "can't mount with "
 +                               "both data=journal and delalloc");
 +                      err = -EINVAL;
 +                      goto restore_opts;
 +              }
 +              if (test_opt(sb, DIOREAD_NOLOCK)) {
 +                      ext4_msg(sb, KERN_ERR, "can't mount with "
 +                               "both data=journal and dioread_nolock");
 +                      err = -EINVAL;
 +                      goto restore_opts;
 +              }
 +      }
 +
        if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
                ext4_abort(sb, "Abort forced by user");
  
@@@ -5521,7 -5481,6 +5521,7 @@@ static void __exit ext4_exit_fs(void
        kset_unregister(ext4_kset);
        ext4_exit_system_zone();
        ext4_exit_pageio();
 +      ext4_exit_es();
  }
  
  MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
diff --combined kernel/workqueue.c
index 29b79852a845cd37d00a4929a92154f572a62a4e,7f01a3eeaf9520e934dfe70f502cd2a3feee8b93..987293d03ebcf0e6bf1c6b81e8a4e68c7965e903
   *
   * This is the generic async execution mechanism.  Work items as are
   * executed in process context.  The worker pool is shared and
 - * automatically managed.  There is one worker pool for each CPU and
 - * one extra for works which are better served by workers which are
 - * not bound to any specific CPU.
 + * automatically managed.  There are two worker pools for each CPU (one for
 + * normal work items and the other for high priority ones) and some extra
 + * pools for workqueues which are not bound to any specific CPU - the
 + * number of these backing pools is dynamic.
   *
   * Please read Documentation/workqueue.txt for details.
   */
@@@ -541,6 -540,8 +541,8 @@@ static int worker_pool_assign_id(struc
   * This must be called either with pwq_lock held or sched RCU read locked.
   * If the pwq needs to be used beyond the locking in effect, the caller is
   * responsible for guaranteeing that the pwq stays online.
+  *
+  * Return: The unbound pool_workqueue for @node.
   */
  static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
                                                  int node)
@@@ -639,8 -640,6 +641,6 @@@ static struct pool_workqueue *get_work_
   * get_work_pool - return the worker_pool a given work was associated with
   * @work: the work item of interest
   *
-  * Return the worker_pool @work was last associated with.  %NULL if none.
-  *
   * Pools are created and destroyed under wq_pool_mutex, and allows read
   * access under sched-RCU read lock.  As such, this function should be
   * called under wq_pool_mutex or with preemption disabled.
   * mentioned locking is in effect.  If the returned pool needs to be used
   * beyond the critical section, the caller is responsible for ensuring the
   * returned pool is and stays online.
+  *
+  * Return: The worker_pool @work was last associated with.  %NULL if none.
   */
  static struct worker_pool *get_work_pool(struct work_struct *work)
  {
   * get_work_pool_id - return the worker pool ID a given work is associated with
   * @work: the work item of interest
   *
-  * Return the worker_pool ID @work was last associated with.
+  * Return: The worker_pool ID @work was last associated with.
   * %WORK_OFFQ_POOL_NONE if none.
   */
  static int get_work_pool_id(struct work_struct *work)
@@@ -831,7 -832,7 +833,7 @@@ void wq_worker_waking_up(struct task_st
   * CONTEXT:
   * spin_lock_irq(rq->lock)
   *
-  * RETURNS:
+  * Return:
   * Worker task on @cpu to wake up, %NULL if none.
   */
  struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
@@@ -966,8 -967,8 +968,8 @@@ static inline void worker_clr_flags(str
   * CONTEXT:
   * spin_lock_irq(pool->lock).
   *
-  * RETURNS:
-  * Pointer to worker which is executing @work if found, NULL
+  * Return:
+  * Pointer to worker which is executing @work if found, %NULL
   * otherwise.
   */
  static struct worker *find_worker_executing_work(struct worker_pool *pool,
@@@ -1155,14 -1156,16 +1157,16 @@@ out_put
   * @flags: place to store irq state
   *
   * Try to grab PENDING bit of @work.  This function can handle @work in any
-  * stable state - idle, on timer or on worklist.  Return values are
+  * stable state - idle, on timer or on worklist.
   *
+  * Return:
   *  1         if @work was pending and we successfully stole PENDING
   *  0         if @work was idle and we claimed PENDING
   *  -EAGAIN   if PENDING couldn't be grabbed at the moment, safe to busy-retry
   *  -ENOENT   if someone else is canceling @work, this state may persist
   *            for arbitrarily long
   *
+  * Note:
   * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
   * interrupted while holding PENDING and @work off queue, irq must be
   * disabled on entry.  This, combined with delayed_work->timer being
@@@ -1404,10 -1407,10 +1408,10 @@@ retry
   * @wq: workqueue to use
   * @work: work to queue
   *
-  * Returns %false if @work was already on a queue, %true otherwise.
-  *
   * We queue the work to a specific CPU, the caller must ensure it
   * can't go away.
+  *
+  * Return: %false if @work was already on a queue, %true otherwise.
   */
  bool queue_work_on(int cpu, struct workqueue_struct *wq,
                   struct work_struct *work)
@@@ -1477,7 -1480,7 +1481,7 @@@ static void __queue_delayed_work(int cp
   * @dwork: work to queue
   * @delay: number of jiffies to wait before queueing
   *
-  * Returns %false if @work was already on a queue, %true otherwise.  If
+  * Return: %false if @work was already on a queue, %true otherwise.  If
   * @delay is zero and @dwork is idle, it will be scheduled for immediate
   * execution.
   */
@@@ -1513,7 -1516,7 +1517,7 @@@ EXPORT_SYMBOL(queue_delayed_work_on)
   * zero, @work is guaranteed to be scheduled immediately regardless of its
   * current state.
   *
-  * Returns %false if @dwork was idle and queued, %true if @dwork was
+  * Return: %false if @dwork was idle and queued, %true if @dwork was
   * pending and its timer was modified.
   *
   * This function is safe to call from any context including IRQ handler.
@@@ -1628,7 -1631,7 +1632,7 @@@ static void worker_leave_idle(struct wo
   * Might sleep.  Called without any lock but returns with pool->lock
   * held.
   *
-  * RETURNS:
+  * Return:
   * %true if the associated pool is online (@worker is successfully
   * bound), %false if offline.
   */
@@@ -1689,7 -1692,7 +1693,7 @@@ static struct worker *alloc_worker(void
   * CONTEXT:
   * Might sleep.  Does GFP_KERNEL allocations.
   *
-  * RETURNS:
+  * Return:
   * Pointer to the newly created worker.
   */
  static struct worker *create_worker(struct worker_pool *pool)
@@@ -1789,6 -1792,8 +1793,8 @@@ static void start_worker(struct worker 
   * @pool: the target pool
   *
   * Grab the managership of @pool and create and start a new worker for it.
+  *
+  * Return: 0 on success. A negative error code otherwise.
   */
  static int create_and_start_worker(struct worker_pool *pool)
  {
@@@ -1933,7 -1938,7 +1939,7 @@@ static void pool_mayday_timeout(unsigne
   * multiple times.  Does GFP_KERNEL allocations.  Called only from
   * manager.
   *
-  * RETURNS:
+  * Return:
   * %false if no action was taken and pool->lock stayed locked, %true
   * otherwise.
   */
@@@ -1990,7 -1995,7 +1996,7 @@@ restart
   * spin_lock_irq(pool->lock) which may be released and regrabbed
   * multiple times.  Called only from manager.
   *
-  * RETURNS:
+  * Return:
   * %false if no action was taken and pool->lock stayed locked, %true
   * otherwise.
   */
@@@ -2033,12 -2038,9 +2039,12 @@@ static bool maybe_destroy_workers(struc
   * spin_lock_irq(pool->lock) which may be released and regrabbed
   * multiple times.  Does GFP_KERNEL allocations.
   *
-  * RETURNS:
+  * Return:
 - * spin_lock_irq(pool->lock) which may be released and regrabbed
 - * multiple times.  Does GFP_KERNEL allocations.
 + * %false if the pool don't need management and the caller can safely start
 + * processing works, %true indicates that the function released pool->lock
 + * and reacquired it to perform some management function and that the
 + * conditions that the caller verified while holding the lock before
 + * calling the function might no longer be true.
   */
  static bool manage_workers(struct worker *worker)
  {
@@@ -2205,15 -2207,6 +2211,15 @@@ __acquires(&pool->lock
                dump_stack();
        }
  
 +      /*
 +       * The following prevents a kworker from hogging CPU on !PREEMPT
 +       * kernels, where a requeueing work item waiting for something to
 +       * happen could deadlock with stop_machine as such work item could
 +       * indefinitely requeue itself while all other CPUs are trapped in
 +       * stop_machine.
 +       */
 +      cond_resched();
 +
        spin_lock_irq(&pool->lock);
  
        /* clear cpu intensive status */
@@@ -2259,6 -2252,8 +2265,8 @@@ static void process_scheduled_works(str
   * work items regardless of their specific target workqueue.  The only
   * exception is work items which belong to workqueues with a rescuer which
   * will be explained in rescuer_thread().
+  *
+  * Return: 0
   */
  static int worker_thread(void *__worker)
  {
@@@ -2357,6 -2352,8 +2365,8 @@@ sleep
   * those works so that forward progress can be guaranteed.
   *
   * This should happen rarely.
+  *
+  * Return: 0
   */
  static int rescuer_thread(void *__rescuer)
  {
@@@ -2529,7 -2526,7 +2539,7 @@@ static void insert_wq_barrier(struct po
   * CONTEXT:
   * mutex_lock(wq->mutex).
   *
-  * RETURNS:
+  * Return:
   * %true if @flush_color >= 0 and there's something to flush.  %false
   * otherwise.
   */
@@@ -2830,19 -2827,6 +2840,19 @@@ already_gone
        return false;
  }
  
 +static bool __flush_work(struct work_struct *work)
 +{
 +      struct wq_barrier barr;
 +
 +      if (start_flush_work(work, &barr)) {
 +              wait_for_completion(&barr.done);
 +              destroy_work_on_stack(&barr.work);
 +              return true;
 +      } else {
 +              return false;
 +      }
 +}
 +
  /**
   * flush_work - wait for a work to finish executing the last queueing instance
   * @work: the work to flush
   * Wait until @work has finished execution.  @work is guaranteed to be idle
   * on return if it hasn't been requeued since flush started.
   *
-  * RETURNS:
+  * Return:
   * %true if flush_work() waited for the work to finish execution,
   * %false if it was already idle.
   */
  bool flush_work(struct work_struct *work)
  {
 -      struct wq_barrier barr;
 -
        lock_map_acquire(&work->lockdep_map);
        lock_map_release(&work->lockdep_map);
  
 -      if (start_flush_work(work, &barr)) {
 -              wait_for_completion(&barr.done);
 -              destroy_work_on_stack(&barr.work);
 -              return true;
 -      } else {
 -              return false;
 -      }
 +      return __flush_work(work);
  }
  EXPORT_SYMBOL_GPL(flush_work);
  
@@@ -2902,7 -2894,7 +2912,7 @@@ static bool __cancel_work_timer(struct 
   * The caller must ensure that the workqueue on which @work was last
   * queued can't be destroyed before this function returns.
   *
-  * RETURNS:
+  * Return:
   * %true if @work was pending, %false otherwise.
   */
  bool cancel_work_sync(struct work_struct *work)
@@@ -2919,7 -2911,7 +2929,7 @@@ EXPORT_SYMBOL_GPL(cancel_work_sync)
   * immediate execution.  Like flush_work(), this function only
   * considers the last queueing instance of @dwork.
   *
-  * RETURNS:
+  * Return:
   * %true if flush_work() waited for the work to finish execution,
   * %false if it was already idle.
   */
@@@ -2937,11 -2929,15 +2947,15 @@@ EXPORT_SYMBOL(flush_delayed_work)
   * cancel_delayed_work - cancel a delayed work
   * @dwork: delayed_work to cancel
   *
-  * Kill off a pending delayed_work.  Returns %true if @dwork was pending
-  * and canceled; %false if wasn't pending.  Note that the work callback
-  * function may still be running on return, unless it returns %true and the
-  * work doesn't re-arm itself.  Explicitly flush or use
-  * cancel_delayed_work_sync() to wait on it.
+  * Kill off a pending delayed_work.
+  *
+  * Return: %true if @dwork was pending and canceled; %false if it wasn't
+  * pending.
+  *
+  * Note:
+  * The work callback function may still be running on return, unless
+  * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
+  * use cancel_delayed_work_sync() to wait on it.
   *
   * This function is safe to call from any context including IRQ handler.
   */
@@@ -2970,7 -2966,7 +2984,7 @@@ EXPORT_SYMBOL(cancel_delayed_work)
   *
   * This is cancel_work_sync() for delayed works.
   *
-  * RETURNS:
+  * Return:
   * %true if @dwork was pending, %false otherwise.
   */
  bool cancel_delayed_work_sync(struct delayed_work *dwork)
@@@ -2987,7 -2983,7 +3001,7 @@@ EXPORT_SYMBOL(cancel_delayed_work_sync)
   * system workqueue and blocks until all CPUs have completed.
   * schedule_on_each_cpu() is very slow.
   *
-  * RETURNS:
+  * Return:
   * 0 on success, -errno on failure.
   */
  int schedule_on_each_cpu(work_func_t func)
@@@ -3055,7 -3051,7 +3069,7 @@@ EXPORT_SYMBOL(flush_scheduled_work)
   * Executes the function immediately if process context is available,
   * otherwise schedules the function for delayed execution.
   *
-  * Returns:   0 - function was executed
+  * Return   0 - function was executed
   *            1 - function was scheduled for execution
   */
  int execute_in_process_context(work_func_t fn, struct execute_work *ew)
@@@ -3099,26 -3095,25 +3113,26 @@@ static struct workqueue_struct *dev_to_
        return wq_dev->wq;
  }
  
 -static ssize_t wq_per_cpu_show(struct device *dev,
 -                             struct device_attribute *attr, char *buf)
 +static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
 +                          char *buf)
  {
        struct workqueue_struct *wq = dev_to_wq(dev);
  
        return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
  }
 +static DEVICE_ATTR_RO(per_cpu);
  
 -static ssize_t wq_max_active_show(struct device *dev,
 -                                struct device_attribute *attr, char *buf)
 +static ssize_t max_active_show(struct device *dev,
 +                             struct device_attribute *attr, char *buf)
  {
        struct workqueue_struct *wq = dev_to_wq(dev);
  
        return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
  }
  
 -static ssize_t wq_max_active_store(struct device *dev,
 -                                 struct device_attribute *attr,
 -                                 const char *buf, size_t count)
 +static ssize_t max_active_store(struct device *dev,
 +                              struct device_attribute *attr, const char *buf,
 +                              size_t count)
  {
        struct workqueue_struct *wq = dev_to_wq(dev);
        int val;
        workqueue_set_max_active(wq, val);
        return count;
  }
 +static DEVICE_ATTR_RW(max_active);
  
 -static struct device_attribute wq_sysfs_attrs[] = {
 -      __ATTR(per_cpu, 0444, wq_per_cpu_show, NULL),
 -      __ATTR(max_active, 0644, wq_max_active_show, wq_max_active_store),
 -      __ATTR_NULL,
 +static struct attribute *wq_sysfs_attrs[] = {
 +      &dev_attr_per_cpu.attr,
 +      &dev_attr_max_active.attr,
 +      NULL,
  };
 +ATTRIBUTE_GROUPS(wq_sysfs);
  
  static ssize_t wq_pool_ids_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
@@@ -3286,7 -3279,7 +3300,7 @@@ static struct device_attribute wq_sysfs
  
  static struct bus_type wq_subsys = {
        .name                           = "workqueue",
 -      .dev_attrs                      = wq_sysfs_attrs,
 +      .dev_groups                     = wq_sysfs_groups,
  };
  
  static int __init wq_sysfs_init(void)
@@@ -3315,7 -3308,7 +3329,7 @@@ static void wq_device_release(struct de
   * apply_workqueue_attrs() may race against userland updating the
   * attributes.
   *
-  * Returns 0 on success, -errno on failure.
+  * Return: 0 on success, -errno on failure.
   */
  int workqueue_sysfs_register(struct workqueue_struct *wq)
  {
@@@ -3408,7 -3401,9 +3422,9 @@@ void free_workqueue_attrs(struct workqu
   * @gfp_mask: allocation mask to use
   *
   * Allocate a new workqueue_attrs, initialize with default settings and
-  * return it.  Returns NULL on failure.
+  * return it.
+  *
+  * Return: The allocated new workqueue_attr on success. %NULL on failure.
   */
  struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
  {
@@@ -3432,12 -3427,6 +3448,12 @@@ static void copy_workqueue_attrs(struc
  {
        to->nice = from->nice;
        cpumask_copy(to->cpumask, from->cpumask);
 +      /*
 +       * Unlike hash and equality test, this function doesn't ignore
 +       * ->no_numa as it is used for both pool and wq attrs.  Instead,
 +       * get_unbound_pool() explicitly clears ->no_numa after copying.
 +       */
 +      to->no_numa = from->no_numa;
  }
  
  /* hash value of the content of @attr */
@@@ -3467,7 -3456,8 +3483,8 @@@ static bool wqattrs_equal(const struct 
   * @pool: worker_pool to initialize
   *
   * Initiailize a newly zalloc'd @pool.  It also allocates @pool->attrs.
-  * Returns 0 on success, -errno on failure.  Even on failure, all fields
+  *
+  * Return: 0 on success, -errno on failure.  Even on failure, all fields
   * inside @pool proper are initialized and put_unbound_pool() can be called
   * on @pool safely to release it.
   */
@@@ -3574,9 -3564,12 +3591,12 @@@ static void put_unbound_pool(struct wor
   * Obtain a worker_pool which has the same attributes as @attrs, bump the
   * reference count and return it.  If there already is a matching
   * worker_pool, it will be used; otherwise, this function attempts to
-  * create a new one.  On failure, returns NULL.
+  * create a new one.
   *
   * Should be called with wq_pool_mutex held.
+  *
+  * Return: On success, a worker_pool with the same attributes as @attrs.
+  * On failure, %NULL.
   */
  static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
  {
        lockdep_set_subclass(&pool->lock, 1);   /* see put_pwq() */
        copy_workqueue_attrs(pool->attrs, attrs);
  
 +      /*
 +       * no_numa isn't a worker_pool attribute, always clear it.  See
 +       * 'struct workqueue_attrs' comments for detail.
 +       */
 +      pool->attrs->no_numa = false;
 +
        /* if cpumask is contained inside a NUMA node, we belong to that node */
        if (wq_numa_enabled) {
                for_each_node(node) {
@@@ -3812,9 -3799,7 +3832,7 @@@ static void free_unbound_pwq(struct poo
   *
   * Calculate the cpumask a workqueue with @attrs should use on @node.  If
   * @cpu_going_down is >= 0, that cpu is considered offline during
-  * calculation.  The result is stored in @cpumask.  This function returns
-  * %true if the resulting @cpumask is different from @attrs->cpumask,
-  * %false if equal.
+  * calculation.  The result is stored in @cpumask.
   *
   * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
   * enabled and @node has online CPUs requested by @attrs, the returned
   *
   * The caller is responsible for ensuring that the cpumask of @node stays
   * stable.
+  *
+  * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
+  * %false if equal.
   */
  static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
                                 int cpu_going_down, cpumask_t *cpumask)
@@@ -3876,8 -3864,9 +3897,9 @@@ static struct pool_workqueue *numa_pwq_
   * items finish.  Note that a work item which repeatedly requeues itself
   * back-to-back will stay on its current pwq.
   *
-  * Performs GFP_KERNEL allocations.  Returns 0 on success and -errno on
-  * failure.
+  * Performs GFP_KERNEL allocations.
+  *
+  * Return: 0 on success and -errno on failure.
   */
  int apply_workqueue_attrs(struct workqueue_struct *wq,
                          const struct workqueue_attrs *attrs)
@@@ -4345,6 -4334,8 +4367,8 @@@ EXPORT_SYMBOL_GPL(workqueue_set_max_act
   *
   * Determine whether %current is a workqueue rescuer.  Can be used from
   * work functions to determine whether it's being run off the rescuer task.
+  *
+  * Return: %true if %current is a workqueue rescuer. %false otherwise.
   */
  bool current_is_workqueue_rescuer(void)
  {
   * workqueue being congested on one CPU doesn't mean the workqueue is also
   * contested on other CPUs / NUMA nodes.
   *
-  * RETURNS:
+  * Return:
   * %true if congested, %false otherwise.
   */
  bool workqueue_congested(int cpu, struct workqueue_struct *wq)
@@@ -4401,7 -4392,7 +4425,7 @@@ EXPORT_SYMBOL_GPL(workqueue_congested)
   * synchronization around this function and the test result is
   * unreliable and only useful as advisory hints or for debugging.
   *
-  * RETURNS:
+  * Return:
   * OR'd bitmask of WORK_BUSY_* bits.
   */
  unsigned int work_busy(struct work_struct *work)
@@@ -4779,9 -4770,10 +4803,10 @@@ static void work_for_cpu_fn(struct work
   * @fn: the function to run
   * @arg: the function arg
   *
-  * This will return the value @fn returns.
   * It is up to the caller to ensure that the cpu doesn't go offline.
   * The caller must not hold any locks which would prevent @fn from completing.
+  *
+  * Return: The value @fn returns.
   */
  long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
  {
  
        INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
        schedule_work_on(cpu, &wfc.work);
 -      flush_work(&wfc.work);
 +
 +      /*
 +       * The work item is on-stack and can't lead to deadlock through
 +       * flushing.  Use __flush_work() to avoid spurious lockdep warnings
 +       * when work_on_cpu()s are nested.
 +       */
 +      __flush_work(&wfc.work);
 +
        return wfc.ret;
  }
  EXPORT_SYMBOL_GPL(work_on_cpu);
@@@ -4853,7 -4838,7 +4878,7 @@@ void freeze_workqueues_begin(void
   * CONTEXT:
   * Grabs and releases wq_pool_mutex.
   *
-  * RETURNS:
+  * Return:
   * %true if some freezable workqueues are still busy.  %false if freezing
   * is complete.
   */
diff --combined mm/memory-failure.c
index 55d7c8026ab078215afff2fe3b088143bcd43f2d,b6fefcf13cbafe17077e88a69087524ef35e0038..d84c5e5331bb5199632f46fda6d3ca3fef9dbaf4
@@@ -1265,7 -1265,7 +1265,7 @@@ void memory_failure_queue(unsigned lon
        if (kfifo_put(&mf_cpu->fifo, &entry))
                schedule_work_on(smp_processor_id(), &mf_cpu->work);
        else
-               pr_err("Memory failure: buffer overflow when queuing memory failure at 0x%#lx\n",
+               pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
                       pfn);
        spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
        put_cpu_var(memory_failure_cpu);
@@@ -1286,10 -1286,7 +1286,10 @@@ static void memory_failure_work_func(st
                spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
                if (!gotten)
                        break;
 -              memory_failure(entry.pfn, entry.trapno, entry.flags);
 +              if (entry.flags & MF_SOFT_OFFLINE)
 +                      soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
 +              else
 +                      memory_failure(entry.pfn, entry.trapno, entry.flags);
        }
  }