]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge tag 'modules-for-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 15 Nov 2017 21:46:33 +0000 (13:46 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 15 Nov 2017 21:46:33 +0000 (13:46 -0800)
Pull module updates from Jessica Yu:
 "Summary of modules changes for the 4.15 merge window:

   - treewide module_param_call() cleanup, fix up set/get function
     prototype mismatches, from Kees Cook

   - minor code cleanups"

* tag 'modules-for-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux:
  module: Do not paper over type mismatches in module_param_call()
  treewide: Fix function prototypes for module_param_call()
  module: Prepare to convert all module_param_call() prototypes
  kernel/module: Delete an error message for a failed memory allocation in add_module_usage()

18 files changed:
1  2 
drivers/acpi/button.c
drivers/acpi/ec.c
drivers/acpi/sysfs.c
drivers/android/binder.c
drivers/isdn/hardware/mISDN/w6692.c
drivers/md/md.c
drivers/pci/pcie/aspm.c
drivers/scsi/fcoe/fcoe_transport.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/tty/serial/kgdboc.c
fs/fuse/inode.c
include/linux/moduleparam.h
include/net/netfilter/nf_conntrack.h
kernel/module.c
net/netfilter/nf_conntrack_core.c
net/sunrpc/svc.c
security/apparmor/lsm.c

diff --combined drivers/acpi/button.c
index c391898b483c84067aadc35f8f36fd932d674fb9,891b0921a3077aefebd0014a848608f2ba26cfdb..bf8e4d371fa70bb3962687319771cf0978b2808e
@@@ -390,7 -390,6 +390,7 @@@ static void acpi_button_notify(struct a
  {
        struct acpi_button *button = acpi_driver_data(device);
        struct input_dev *input;
 +      int users;
  
        switch (event) {
        case ACPI_FIXED_HARDWARE_EVENT:
        case ACPI_BUTTON_NOTIFY_STATUS:
                input = button->input;
                if (button->type == ACPI_BUTTON_TYPE_LID) {
 -                      acpi_lid_update_state(device);
 +                      mutex_lock(&button->input->mutex);
 +                      users = button->input->users;
 +                      mutex_unlock(&button->input->mutex);
 +                      if (users)
 +                              acpi_lid_update_state(device);
                } else {
                        int keycode;
  
@@@ -447,24 -442,12 +447,24 @@@ static int acpi_button_resume(struct de
        struct acpi_button *button = acpi_driver_data(device);
  
        button->suspended = false;
 -      if (button->type == ACPI_BUTTON_TYPE_LID)
 +      if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users)
                acpi_lid_initialize_state(device);
        return 0;
  }
  #endif
  
 +static int acpi_lid_input_open(struct input_dev *input)
 +{
 +      struct acpi_device *device = input_get_drvdata(input);
 +      struct acpi_button *button = acpi_driver_data(device);
 +
 +      button->last_state = !!acpi_lid_evaluate_state(device);
 +      button->last_time = ktime_get();
 +      acpi_lid_initialize_state(device);
 +
 +      return 0;
 +}
 +
  static int acpi_button_add(struct acpi_device *device)
  {
        struct acpi_button *button;
                strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID);
                sprintf(class, "%s/%s",
                        ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID);
 -              button->last_state = !!acpi_lid_evaluate_state(device);
 -              button->last_time = ktime_get();
 +              input->open = acpi_lid_input_open;
        } else {
                printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid);
                error = -ENODEV;
                break;
        }
  
 +      input_set_drvdata(input, device);
        error = input_register_device(input);
        if (error)
                goto err_remove_fs;
        if (button->type == ACPI_BUTTON_TYPE_LID) {
 -              acpi_lid_initialize_state(device);
                /*
                 * This assumes there's only one lid device, or if there are
                 * more we only care about the last one...
@@@ -573,7 -557,8 +573,8 @@@ static int acpi_button_remove(struct ac
        return 0;
  }
  
- static int param_set_lid_init_state(const char *val, struct kernel_param *kp)
+ static int param_set_lid_init_state(const char *val,
+                                   const struct kernel_param *kp)
  {
        int result = 0;
  
        return result;
  }
  
- static int param_get_lid_init_state(char *buffer, struct kernel_param *kp)
+ static int param_get_lid_init_state(char *buffer,
+                                   const struct kernel_param *kp)
  {
        switch (lid_init_state) {
        case ACPI_BUTTON_LID_INIT_OPEN:
diff --combined drivers/acpi/ec.c
index 82b3ce5e937e8b980acd1a02e6a1f21c408a5ee0,ba2cebf1bb2f98d3467b99b41f2360fcea1e1673..da176c95aa2cb3a4fd89e7d0b100d0ff7d161405
@@@ -486,11 -486,8 +486,11 @@@ static inline void __acpi_ec_enable_eve
  {
        if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
                ec_log_drv("event unblocked");
 -      if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
 -              advance_transaction(ec);
 +      /*
 +       * Unconditionally invoke this once after enabling the event
 +       * handling mechanism to detect the pending events.
 +       */
 +      advance_transaction(ec);
  }
  
  static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
@@@ -1459,10 -1456,11 +1459,10 @@@ static int ec_install_handlers(struct a
                        if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
                            ec->reference_count >= 1)
                                acpi_ec_enable_gpe(ec, true);
 -
 -                      /* EC is fully operational, allow queries */
 -                      acpi_ec_enable_event(ec);
                }
        }
 +      /* EC is fully operational, allow queries */
 +      acpi_ec_enable_event(ec);
  
        return 0;
  }
@@@ -1941,7 -1939,8 +1941,8 @@@ static const struct dev_pm_ops acpi_ec_
        SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
  };
  
- static int param_set_event_clearing(const char *val, struct kernel_param *kp)
+ static int param_set_event_clearing(const char *val,
+                                   const struct kernel_param *kp)
  {
        int result = 0;
  
        return result;
  }
  
- static int param_get_event_clearing(char *buffer, struct kernel_param *kp)
+ static int param_get_event_clearing(char *buffer,
+                                   const struct kernel_param *kp)
  {
        switch (ec_event_clearing) {
        case ACPI_EC_EVT_TIMING_STATUS:
diff --combined drivers/acpi/sysfs.c
index bc303df60d5dcebf336d6a7822df02fa2b8cc68a,cf2c1b2b2d6487305ac8f061cbd10e1465fff5da..06a150bb35bf7bb5041d74bcb691c0f9f63ea278
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0
  /*
   * sysfs.c - ACPI sysfs interface to userspace.
   */
@@@ -169,8 -168,7 +169,8 @@@ module_param_cb(debug_level, &param_ops
  
  static char trace_method_name[1024];
  
 -int param_set_trace_method_name(const char *val, const struct kernel_param *kp)
 +static int param_set_trace_method_name(const char *val,
 +                                     const struct kernel_param *kp)
  {
        u32 saved_flags = 0;
        bool is_abs_path = true;
@@@ -231,7 -229,8 +231,8 @@@ module_param_cb(trace_method_name, &par
  module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
  module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
  
- static int param_set_trace_state(const char *val, struct kernel_param *kp)
+ static int param_set_trace_state(const char *val,
+                                const struct kernel_param *kp)
  {
        acpi_status status;
        const char *method = trace_method_name;
        return 0;
  }
  
- static int param_get_trace_state(char *buffer, struct kernel_param *kp)
+ static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
  {
        if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
                return sprintf(buffer, "disable");
@@@ -296,7 -295,8 +297,8 @@@ MODULE_PARM_DESC(aml_debug_output
                 "To enable/disable the ACPI Debug Object output.");
  
  /* /sys/module/acpi/parameters/acpica_version */
- static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
+ static int param_get_acpica_version(char *buffer,
+                                   const struct kernel_param *kp)
  {
        int result;
  
diff --combined drivers/android/binder.c
index fddf76ef5bd6d824e2017fedb9110418b45270de,c45157d71b33a48fd08cb872f6f085f8fe40e067..7255f94ded30594a964154f405611174b071366d
@@@ -150,7 -150,7 +150,7 @@@ static DECLARE_WAIT_QUEUE_HEAD(binder_u
  static int binder_stop_on_user_error;
  
  static int binder_set_stop_on_user_error(const char *val,
-                                        struct kernel_param *kp)
+                                        const struct kernel_param *kp)
  {
        int ret;
  
@@@ -3662,6 -3662,12 +3662,6 @@@ static void binder_stat_br(struct binde
        }
  }
  
 -static int binder_has_thread_work(struct binder_thread *thread)
 -{
 -      return !binder_worklist_empty(thread->proc, &thread->todo) ||
 -              thread->looper_need_return;
 -}
 -
  static int binder_put_node_cmd(struct binder_proc *proc,
                               struct binder_thread *thread,
                               void __user **ptrp,
@@@ -4291,9 -4297,12 +4291,9 @@@ static unsigned int binder_poll(struct 
  
        binder_inner_proc_unlock(thread->proc);
  
 -      if (binder_has_work(thread, wait_for_proc_work))
 -              return POLLIN;
 -
        poll_wait(filp, &thread->wait, wait);
  
 -      if (binder_has_thread_work(thread))
 +      if (binder_has_work(thread, wait_for_proc_work))
                return POLLIN;
  
        return 0;
index 536d5137f49d6fcba80fe2cbb4aeb684a5ded6ad,209036a4af3a28c23498c9fdbc6c333bac0e33a8..5acf6ab67cd35d51585f25c4e4d20fad1fe2ea2f
@@@ -101,7 -101,7 +101,7 @@@ _set_debug(struct w6692_hw *card
  }
  
  static int
- set_debug(const char *val, struct kernel_param *kp)
+ set_debug(const char *val, const struct kernel_param *kp)
  {
        int ret;
        struct w6692_hw *card;
@@@ -311,6 -311,7 +311,6 @@@ W6692_fill_Dfifo(struct w6692_hw *card
                pr_debug("%s: fill_Dfifo dbusytimer running\n", card->name);
                del_timer(&dch->timer);
        }
 -      init_timer(&dch->timer);
        dch->timer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ) / 1000);
        add_timer(&dch->timer);
        if (debug & DEBUG_HW_DFIFO) {
@@@ -818,9 -819,8 +818,9 @@@ w6692_irq(int intno, void *dev_id
  }
  
  static void
 -dbusy_timer_handler(struct dchannel *dch)
 +dbusy_timer_handler(struct timer_list *t)
  {
 +      struct dchannel *dch = from_timer(dch, t, timer);
        struct w6692_hw *card = dch->hw;
        int             rbch, star;
        u_long          flags;
@@@ -852,7 -852,8 +852,7 @@@ static void initW6692(struct w6692_hw *
  {
        u8      val;
  
 -      setup_timer(&card->dch.timer, (void *)dbusy_timer_handler,
 -                  (u_long)&card->dch);
 +      timer_setup(&card->dch.timer, dbusy_timer_handler, 0);
        w6692_mode(&card->bc[0], ISDN_P_NONE);
        w6692_mode(&card->bc[1], ISDN_P_NONE);
        WriteW6692(card, W_D_CTL, 0x00);
diff --combined drivers/md/md.c
index 09c3af3dcdca1f83106ec89538d2a599be1f9b9d,276c7ecedf107b7757e238ec5bdb40d9f2336629..c3dc134b9fb5771b805db0d6a9ba02637be0e47f
@@@ -69,7 -69,7 +69,7 @@@
  
  #include <trace/events/block.h>
  #include "md.h"
 -#include "bitmap.h"
 +#include "md-bitmap.h"
  #include "md-cluster.h"
  
  #ifndef MODULE
@@@ -266,31 -266,16 +266,31 @@@ static DEFINE_SPINLOCK(all_mddevs_lock)
   * call has finished, the bio has been linked into some internal structure
   * and so is visible to ->quiesce(), so we don't need the refcount any more.
   */
 +static bool is_suspended(struct mddev *mddev, struct bio *bio)
 +{
 +      if (mddev->suspended)
 +              return true;
 +      if (bio_data_dir(bio) != WRITE)
 +              return false;
 +      if (mddev->suspend_lo >= mddev->suspend_hi)
 +              return false;
 +      if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
 +              return false;
 +      if (bio_end_sector(bio) < mddev->suspend_lo)
 +              return false;
 +      return true;
 +}
 +
  void md_handle_request(struct mddev *mddev, struct bio *bio)
  {
  check_suspended:
        rcu_read_lock();
 -      if (mddev->suspended) {
 +      if (is_suspended(mddev, bio)) {
                DEFINE_WAIT(__wait);
                for (;;) {
                        prepare_to_wait(&mddev->sb_wait, &__wait,
                                        TASK_UNINTERRUPTIBLE);
 -                      if (!mddev->suspended)
 +                      if (!is_suspended(mddev, bio))
                                break;
                        rcu_read_unlock();
                        schedule();
@@@ -359,17 -344,12 +359,17 @@@ static blk_qc_t md_make_request(struct 
  void mddev_suspend(struct mddev *mddev)
  {
        WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
 +      lockdep_assert_held(&mddev->reconfig_mutex);
        if (mddev->suspended++)
                return;
        synchronize_rcu();
        wake_up(&mddev->sb_wait);
 +      set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
 +      smp_mb__after_atomic();
        wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
        mddev->pers->quiesce(mddev, 1);
 +      clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
 +      wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
  
        del_timer_sync(&mddev->safemode_timer);
  }
@@@ -377,7 -357,6 +377,7 @@@ EXPORT_SYMBOL_GPL(mddev_suspend)
  
  void mddev_resume(struct mddev *mddev)
  {
 +      lockdep_assert_held(&mddev->reconfig_mutex);
        if (--mddev->suspended)
                return;
        wake_up(&mddev->sb_wait);
@@@ -684,7 -663,6 +684,7 @@@ void mddev_unlock(struct mddev *mddev
         */
        spin_lock(&pers_lock);
        md_wakeup_thread(mddev->thread);
 +      wake_up(&mddev->sb_wait);
        spin_unlock(&pers_lock);
  }
  EXPORT_SYMBOL_GPL(mddev_unlock);
@@@ -2335,7 -2313,7 +2335,7 @@@ static void export_array(struct mddev *
  
  static bool set_in_sync(struct mddev *mddev)
  {
 -      WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock));
 +      lockdep_assert_held(&mddev->lock);
        if (!mddev->in_sync) {
                mddev->sync_checkers++;
                spin_unlock(&mddev->lock);
@@@ -2454,18 -2432,10 +2454,18 @@@ repeat
                }
        }
  
 -      /* First make sure individual recovery_offsets are correct */
 +      /*
 +       * First make sure individual recovery_offsets are correct
 +       * curr_resync_completed can only be used during recovery.
 +       * During reshape/resync it might use array-addresses rather
 +       * that device addresses.
 +       */
        rdev_for_each(rdev, mddev) {
                if (rdev->raid_disk >= 0 &&
                    mddev->delta_disks >= 0 &&
 +                  test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
 +                  test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
 +                  !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
                    !test_bit(Journal, &rdev->flags) &&
                    !test_bit(In_sync, &rdev->flags) &&
                    mddev->curr_resync_completed > rdev->recovery_offset)
@@@ -2681,7 -2651,7 +2681,7 @@@ state_show(struct md_rdev *rdev, char *
  {
        char *sep = ",";
        size_t len = 0;
 -      unsigned long flags = ACCESS_ONCE(rdev->flags);
 +      unsigned long flags = READ_ONCE(rdev->flags);
  
        if (test_bit(Faulty, &flags) ||
            (!test_bit(ExternalBbl, &flags) &&
@@@ -4854,7 -4824,7 +4854,7 @@@ suspend_lo_show(struct mddev *mddev, ch
  static ssize_t
  suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
  {
 -      unsigned long long old, new;
 +      unsigned long long new;
        int err;
  
        err = kstrtoull(buf, 10, &new);
        if (mddev->pers == NULL ||
            mddev->pers->quiesce == NULL)
                goto unlock;
 -      old = mddev->suspend_lo;
 +      mddev_suspend(mddev);
        mddev->suspend_lo = new;
 -      if (new >= old)
 -              /* Shrinking suspended region */
 -              mddev->pers->quiesce(mddev, 2);
 -      else {
 -              /* Expanding suspended region - need to wait */
 -              mddev->pers->quiesce(mddev, 1);
 -              mddev->pers->quiesce(mddev, 0);
 -      }
 +      mddev_resume(mddev);
 +
        err = 0;
  unlock:
        mddev_unlock(mddev);
@@@ -4891,7 -4867,7 +4891,7 @@@ suspend_hi_show(struct mddev *mddev, ch
  static ssize_t
  suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
  {
 -      unsigned long long old, new;
 +      unsigned long long new;
        int err;
  
        err = kstrtoull(buf, 10, &new);
        if (err)
                return err;
        err = -EINVAL;
 -      if (mddev->pers == NULL ||
 -          mddev->pers->quiesce == NULL)
 +      if (mddev->pers == NULL)
                goto unlock;
 -      old = mddev->suspend_hi;
 +
 +      mddev_suspend(mddev);
        mddev->suspend_hi = new;
 -      if (new <= old)
 -              /* Shrinking suspended region */
 -              mddev->pers->quiesce(mddev, 2);
 -      else {
 -              /* Expanding suspended region - need to wait */
 -              mddev->pers->quiesce(mddev, 1);
 -              mddev->pers->quiesce(mddev, 0);
 -      }
 +      mddev_resume(mddev);
 +
        err = 0;
  unlock:
        mddev_unlock(mddev);
@@@ -5375,7 -5357,7 +5375,7 @@@ static struct kobject *md_probe(dev_t d
        return NULL;
  }
  
- static int add_named_array(const char *val, struct kernel_param *kp)
+ static int add_named_array(const char *val, const struct kernel_param *kp)
  {
        /*
         * val must be "md_*" or "mdNNN".
@@@ -5852,14 -5834,8 +5852,14 @@@ void md_stop(struct mddev *mddev
         * This is called from dm-raid
         */
        __md_stop(mddev);
 -      if (mddev->bio_set)
 +      if (mddev->bio_set) {
                bioset_free(mddev->bio_set);
 +              mddev->bio_set = NULL;
 +      }
 +      if (mddev->sync_set) {
 +              bioset_free(mddev->sync_set);
 +              mddev->sync_set = NULL;
 +      }
  }
  
  EXPORT_SYMBOL_GPL(md_stop);
@@@ -6386,7 -6362,7 +6386,7 @@@ static int add_new_disk(struct mddev *m
                                        break;
                                }
                        }
 -                      if (has_journal) {
 +                      if (has_journal || mddev->bitmap) {
                                export_rdev(rdev);
                                return -EBUSY;
                        }
@@@ -6642,26 -6618,22 +6642,26 @@@ static int set_bitmap_file(struct mdde
                return -ENOENT; /* cannot remove what isn't there */
        err = 0;
        if (mddev->pers) {
 -              mddev->pers->quiesce(mddev, 1);
                if (fd >= 0) {
                        struct bitmap *bitmap;
  
                        bitmap = bitmap_create(mddev, -1);
 +                      mddev_suspend(mddev);
                        if (!IS_ERR(bitmap)) {
                                mddev->bitmap = bitmap;
                                err = bitmap_load(mddev);
                        } else
                                err = PTR_ERR(bitmap);
 -              }
 -              if (fd < 0 || err) {
 +                      if (err) {
 +                              bitmap_destroy(mddev);
 +                              fd = -1;
 +                      }
 +                      mddev_resume(mddev);
 +              } else if (fd < 0) {
 +                      mddev_suspend(mddev);
                        bitmap_destroy(mddev);
 -                      fd = -1; /* make sure to put the file */
 +                      mddev_resume(mddev);
                }
 -              mddev->pers->quiesce(mddev, 0);
        }
        if (fd < 0) {
                struct file *f = mddev->bitmap_info.file;
@@@ -6763,7 -6735,7 +6763,7 @@@ static int set_array_info(struct mddev 
  
  void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
  {
 -      WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
 +      lockdep_assert_held(&mddev->reconfig_mutex);
  
        if (mddev->external_size)
                return;
@@@ -6945,8 -6917,8 +6945,8 @@@ static int update_array_info(struct mdd
                                mddev->bitmap_info.default_offset;
                        mddev->bitmap_info.space =
                                mddev->bitmap_info.default_space;
 -                      mddev->pers->quiesce(mddev, 1);
                        bitmap = bitmap_create(mddev, -1);
 +                      mddev_suspend(mddev);
                        if (!IS_ERR(bitmap)) {
                                mddev->bitmap = bitmap;
                                rv = bitmap_load(mddev);
                                rv = PTR_ERR(bitmap);
                        if (rv)
                                bitmap_destroy(mddev);
 -                      mddev->pers->quiesce(mddev, 0);
 +                      mddev_resume(mddev);
                } else {
                        /* remove the bitmap */
                        if (!mddev->bitmap) {
                                mddev->bitmap_info.nodes = 0;
                                md_cluster_ops->leave(mddev);
                        }
 -                      mddev->pers->quiesce(mddev, 1);
 +                      mddev_suspend(mddev);
                        bitmap_destroy(mddev);
 -                      mddev->pers->quiesce(mddev, 0);
 +                      mddev_resume(mddev);
                        mddev->bitmap_info.offset = 0;
                }
        }
@@@ -7496,8 -7468,8 +7496,8 @@@ void md_wakeup_thread(struct md_thread 
  {
        if (thread) {
                pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
 -              if (!test_and_set_bit(THREAD_WAKEUP, &thread->flags))
 -                      wake_up(&thread->wqueue);
 +              set_bit(THREAD_WAKEUP, &thread->flags);
 +              wake_up(&thread->wqueue);
        }
  }
  EXPORT_SYMBOL(md_wakeup_thread);
@@@ -8067,8 -8039,7 +8067,8 @@@ bool md_write_start(struct mddev *mddev
        if (did_change)
                sysfs_notify_dirent_safe(mddev->sysfs_state);
        wait_event(mddev->sb_wait,
 -                 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended);
 +                 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
 +                 mddev->suspended);
        if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
                percpu_ref_put(&mddev->writes_pending);
                return false;
@@@ -8139,6 -8110,7 +8139,6 @@@ void md_allow_write(struct mddev *mddev
                sysfs_notify_dirent_safe(mddev->sysfs_state);
                /* wait for the dirty state to be recorded in the metadata */
                wait_event(mddev->sb_wait,
 -                         !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) &&
                           !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
        } else
                spin_unlock(&mddev->lock);
@@@ -8505,19 -8477,16 +8505,19 @@@ void md_do_sync(struct md_thread *threa
                } else {
                        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                                mddev->curr_resync = MaxSector;
 -                      rcu_read_lock();
 -                      rdev_for_each_rcu(rdev, mddev)
 -                              if (rdev->raid_disk >= 0 &&
 -                                  mddev->delta_disks >= 0 &&
 -                                  !test_bit(Journal, &rdev->flags) &&
 -                                  !test_bit(Faulty, &rdev->flags) &&
 -                                  !test_bit(In_sync, &rdev->flags) &&
 -                                  rdev->recovery_offset < mddev->curr_resync)
 -                                      rdev->recovery_offset = mddev->curr_resync;
 -                      rcu_read_unlock();
 +                      if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
 +                          test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
 +                              rcu_read_lock();
 +                              rdev_for_each_rcu(rdev, mddev)
 +                                      if (rdev->raid_disk >= 0 &&
 +                                          mddev->delta_disks >= 0 &&
 +                                          !test_bit(Journal, &rdev->flags) &&
 +                                          !test_bit(Faulty, &rdev->flags) &&
 +                                          !test_bit(In_sync, &rdev->flags) &&
 +                                          rdev->recovery_offset < mddev->curr_resync)
 +                                              rdev->recovery_offset = mddev->curr_resync;
 +                              rcu_read_unlock();
 +                      }
                }
        }
   skip:
@@@ -8844,16 -8813,6 +8844,16 @@@ void md_check_recovery(struct mddev *md
        unlock:
                wake_up(&mddev->sb_wait);
                mddev_unlock(mddev);
 +      } else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
 +              /* Write superblock - thread that called mddev_suspend()
 +               * holds reconfig_mutex for us.
 +               */
 +              set_bit(MD_UPDATING_SB, &mddev->flags);
 +              smp_mb__after_atomic();
 +              if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
 +                      md_update_sb(mddev, 0);
 +              clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
 +              wake_up(&mddev->sb_wait);
        }
  }
  EXPORT_SYMBOL(md_check_recovery);
@@@ -9315,11 -9274,11 +9315,11 @@@ static __exit void md_exit(void
  subsys_initcall(md_init);
  module_exit(md_exit)
  
- static int get_ro(char *buffer, struct kernel_param *kp)
+ static int get_ro(char *buffer, const struct kernel_param *kp)
  {
        return sprintf(buffer, "%d", start_readonly);
  }
- static int set_ro(const char *val, struct kernel_param *kp)
+ static int set_ro(const char *val, const struct kernel_param *kp)
  {
        return kstrtouint(val, 10, (unsigned int *)&start_readonly);
  }
diff --combined drivers/pci/pcie/aspm.c
index 83e4a892b14be796700f3c3a81733470ec425704,ca3ee0f7e61dec82f2f7dceac963ecf45b93c62c..af2c0023a1c2405eb5fefbd70109e318af4319f5
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0
  /*
   * File:      drivers/pci/pcie/aspm.c
   * Enabling PCIe link L0s/L1 state and Clock Power Management
@@@ -1061,7 -1060,8 +1061,8 @@@ void pci_disable_link_state(struct pci_
  }
  EXPORT_SYMBOL(pci_disable_link_state);
  
- static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
+ static int pcie_aspm_set_policy(const char *val,
+                               const struct kernel_param *kp)
  {
        int i;
        struct pcie_link_state *link;
        return 0;
  }
  
- static int pcie_aspm_get_policy(char *buffer, struct kernel_param *kp)
+ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
  {
        int i, cnt = 0;
        for (i = 0; i < ARRAY_SIZE(policy_str); i++)
index 1ba5f51713a3216353991e1d7aa54217f66ab851,c5eb0c468f0b9c5f2004f02f6f87939756105bd7..f4909cd206d3e69afd042cff5865fc32a94c056c
@@@ -32,13 -32,13 +32,13 @@@ MODULE_AUTHOR("Open-FCoE.org")
  MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs");
  MODULE_LICENSE("GPL v2");
  
- static int fcoe_transport_create(const char *, struct kernel_param *);
- static int fcoe_transport_destroy(const char *, struct kernel_param *);
+ static int fcoe_transport_create(const char *, const struct kernel_param *);
+ static int fcoe_transport_destroy(const char *, const struct kernel_param *);
  static int fcoe_transport_show(char *buffer, const struct kernel_param *kp);
  static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device);
  static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device);
- static int fcoe_transport_enable(const char *, struct kernel_param *);
- static int fcoe_transport_disable(const char *, struct kernel_param *);
+ static int fcoe_transport_enable(const char *, const struct kernel_param *);
+ static int fcoe_transport_disable(const char *, const struct kernel_param *);
  static int libfcoe_device_notification(struct notifier_block *notifier,
                                    ulong event, void *ptr);
  
@@@ -455,11 -455,9 +455,11 @@@ EXPORT_SYMBOL_GPL(fcoe_check_wait_queue
   *
   * Calls fcoe_check_wait_queue on timeout
   */
 -void fcoe_queue_timer(ulong lport)
 +void fcoe_queue_timer(struct timer_list *t)
  {
 -      fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
 +      struct fcoe_port *port = from_timer(port, t, timer);
 +
 +      fcoe_check_wait_queue(port->lport, NULL);
  }
  EXPORT_SYMBOL_GPL(fcoe_queue_timer);
  
@@@ -867,7 -865,8 +867,8 @@@ EXPORT_SYMBOL(fcoe_ctlr_destroy_store)
   *
   * Returns: 0 for success
   */
- static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
+ static int fcoe_transport_create(const char *buffer,
+                                const struct kernel_param *kp)
  {
        int rc = -ENODEV;
        struct net_device *netdev = NULL;
@@@ -932,7 -931,8 +933,8 @@@ out_nodev
   *
   * Returns: 0 for success
   */
- static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
+ static int fcoe_transport_destroy(const char *buffer,
+                                 const struct kernel_param *kp)
  {
        int rc = -ENODEV;
        struct net_device *netdev = NULL;
@@@ -976,7 -976,8 +978,8 @@@ out_nodev
   *
   * Returns: 0 for success
   */
- static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
+ static int fcoe_transport_disable(const char *buffer,
+                                 const struct kernel_param *kp)
  {
        int rc = -ENODEV;
        struct net_device *netdev = NULL;
@@@ -1010,7 -1011,8 +1013,8 @@@ out_nodev
   *
   * Returns: 0 for success
   */
- static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
+ static int fcoe_transport_enable(const char *buffer,
+                                const struct kernel_param *kp)
  {
        int rc = -ENODEV;
        struct net_device *netdev = NULL;
index a29534c1824ebfc60846ccbc486d75999150ff0d,3d36deee82857e67596e7cd4c094ff02c510f4e9..8027de465d474fc0a9dfe196cfcb41fcc1c0e129
@@@ -59,7 -59,6 +59,7 @@@
  #include <linux/time.h>
  #include <linux/ktime.h>
  #include <linux/kthread.h>
 +#include <asm/page.h>        /* To get host page size per arch */
  #include <linux/aer.h>
  
  
@@@ -106,7 -105,7 +106,7 @@@ _base_get_ioc_facts(struct MPT3SAS_ADAP
   *
   */
  static int
- _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
+ _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
  {
        int ret = param_set_int(val, kp);
        struct MPT3SAS_ADAPTER *ioc;
@@@ -557,11 -556,6 +557,11 @@@ _base_sas_ioc_info(struct MPT3SAS_ADAPT
                frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
                func_str = "smp_passthru";
                break;
 +      case MPI2_FUNCTION_NVME_ENCAPSULATED:
 +              frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
 +                  ioc->sge_size;
 +              func_str = "nvme_encapsulated";
 +              break;
        default:
                frame_sz = 32;
                func_str = "unknown";
@@@ -661,27 -655,7 +661,27 @@@ _base_display_event_data(struct MPT3SAS
                desc = "Temperature Threshold";
                break;
        case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
 -              desc = "Active cable exception";
 +              desc = "Cable Event";
 +              break;
 +      case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
 +              desc = "PCIE Device Status Change";
 +              break;
 +      case MPI2_EVENT_PCIE_ENUMERATION:
 +      {
 +              Mpi26EventDataPCIeEnumeration_t *event_data =
 +                      (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
 +              pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
 +                         (event_data->ReasonCode ==
 +                              MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
 +                              "start" : "stop");
 +              if (event_data->EnumerationStatus)
 +                      pr_info("enumeration_status(0x%08x)",
 +                                 le32_to_cpu(event_data->EnumerationStatus));
 +              pr_info("\n");
 +              return;
 +      }
 +      case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
 +              desc = "PCIE Topology Change List";
                break;
        }
  
@@@ -1010,9 -984,7 +1010,9 @@@ _base_interrupt(int irq, void *bus_id
                if (request_desript_type ==
                    MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
                    request_desript_type ==
 -                  MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
 +                  MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
 +                  request_desript_type ==
 +                  MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
                        cb_idx = _base_get_cb_idx(ioc, smid);
                        if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
                            (likely(mpt_callbacks[cb_idx] != NULL))) {
@@@ -1374,433 -1346,6 +1374,433 @@@ _base_build_sg(struct MPT3SAS_ADAPTER *
  
  /* IEEE format sgls */
  
 +/**
 + * _base_build_nvme_prp - This function is called for NVMe end devices to build
 + * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
 + * entry of the NVMe message (PRP1).  If the data buffer is small enough to be
 + * described entirely using PRP1, then PRP2 is not used.  If needed, PRP2 is
 + * used to describe a larger data buffer.  If the data buffer is too large to
 + * describe using the two PRP entriess inside the NVMe message, then PRP1
 + * describes the first data memory segment, and PRP2 contains a pointer to a PRP
 + * list located elsewhere in memory to describe the remaining data memory
 + * segments.  The PRP list will be contiguous.
 +
 + * The native SGL for NVMe devices is a Physical Region Page (PRP).  A PRP
 + * consists of a list of PRP entries to describe a number of noncontigous
 + * physical memory segments as a single memory buffer, just as a SGL does.  Note
 + * however, that this function is only used by the IOCTL call, so the memory
 + * given will be guaranteed to be contiguous.  There is no need to translate
 + * non-contiguous SGL into a PRP in this case.  All PRPs will describe
 + * contiguous space that is one page size each.
 + *
 + * Each NVMe message contains two PRP entries.  The first (PRP1) either contains
 + * a PRP list pointer or a PRP element, depending upon the command.  PRP2
 + * contains the second PRP element if the memory being described fits within 2
 + * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
 + *
 + * A PRP list pointer contains the address of a PRP list, structured as a linear
 + * array of PRP entries.  Each PRP entry in this list describes a segment of
 + * physical memory.
 + *
 + * Each 64-bit PRP entry comprises an address and an offset field.  The address
 + * always points at the beginning of a 4KB physical memory page, and the offset
 + * describes where within that 4KB page the memory segment begins.  Only the
 + * first element in a PRP list may contain a non-zero offest, implying that all
 + * memory segments following the first begin at the start of a 4KB page.
 + *
 + * Each PRP element normally describes 4KB of physical memory, with exceptions
 + * for the first and last elements in the list.  If the memory being described
 + * by the list begins at a non-zero offset within the first 4KB page, then the
 + * first PRP element will contain a non-zero offset indicating where the region
 + * begins within the 4KB page.  The last memory segment may end before the end
 + * of the 4KB segment, depending upon the overall size of the memory being
 + * described by the PRP list.
 + *
 + * Since PRP entries lack any indication of size, the overall data buffer length
 + * is used to determine where the end of the data memory buffer is located, and
 + * how many PRP entries are required to describe it.
 + *
 + * @ioc: per adapter object
 + * @smid: system request message index for getting asscociated SGL
 + * @nvme_encap_request: the NVMe request msg frame pointer
 + * @data_out_dma: physical address for WRITES
 + * @data_out_sz: data xfer size for WRITES
 + * @data_in_dma: physical address for READS
 + * @data_in_sz: data xfer size for READS
 + *
 + * Returns nothing.
 + */
 +static void
 +_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 +      Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
 +      dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
 +      size_t data_in_sz)
 +{
 +      int             prp_size = NVME_PRP_SIZE;
 +      __le64          *prp_entry, *prp1_entry, *prp2_entry;
 +      __le64          *prp_page;
 +      dma_addr_t      prp_entry_dma, prp_page_dma, dma_addr;
 +      u32             offset, entry_len;
 +      u32             page_mask_result, page_mask;
 +      size_t          length;
 +
 +      /*
 +       * Not all commands require a data transfer. If no data, just return
 +       * without constructing any PRP.
 +       */
 +      if (!data_in_sz && !data_out_sz)
 +              return;
 +      /*
 +       * Set pointers to PRP1 and PRP2, which are in the NVMe command.
 +       * PRP1 is located at a 24 byte offset from the start of the NVMe
 +       * command.  Then set the current PRP entry pointer to PRP1.
 +       */
 +      prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
 +          NVME_CMD_PRP1_OFFSET);
 +      prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
 +          NVME_CMD_PRP2_OFFSET);
 +      prp_entry = prp1_entry;
 +      /*
 +       * For the PRP entries, use the specially allocated buffer of
 +       * contiguous memory.
 +       */
 +      prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
 +      prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
 +
 +      /*
 +       * Check if we are within 1 entry of a page boundary we don't
 +       * want our first entry to be a PRP List entry.
 +       */
 +      page_mask = ioc->page_size - 1;
 +      page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
 +      if (!page_mask_result) {
 +              /* Bump up to next page boundary. */
 +              prp_page = (__le64 *)((u8 *)prp_page + prp_size);
 +              prp_page_dma = prp_page_dma + prp_size;
 +      }
 +
 +      /*
 +       * Set PRP physical pointer, which initially points to the current PRP
 +       * DMA memory page.
 +       */
 +      prp_entry_dma = prp_page_dma;
 +
 +      /* Get physical address and length of the data buffer. */
 +      if (data_in_sz) {
 +              dma_addr = data_in_dma;
 +              length = data_in_sz;
 +      } else {
 +              dma_addr = data_out_dma;
 +              length = data_out_sz;
 +      }
 +
 +      /* Loop while the length is not zero. */
 +      while (length) {
 +              /*
 +               * Check if we need to put a list pointer here if we are at
 +               * page boundary - prp_size (8 bytes).
 +               */
 +              page_mask_result = (prp_entry_dma + prp_size) & page_mask;
 +              if (!page_mask_result) {
 +                      /*
 +                       * This is the last entry in a PRP List, so we need to
 +                       * put a PRP list pointer here.  What this does is:
 +                       *   - bump the current memory pointer to the next
 +                       *     address, which will be the next full page.
 +                       *   - set the PRP Entry to point to that page.  This
 +                       *     is now the PRP List pointer.
 +                       *   - bump the PRP Entry pointer the start of the
 +                       *     next page.  Since all of this PRP memory is
 +                       *     contiguous, no need to get a new page - it's
 +                       *     just the next address.
 +                       */
 +                      prp_entry_dma++;
 +                      *prp_entry = cpu_to_le64(prp_entry_dma);
 +                      prp_entry++;
 +              }
 +
 +              /* Need to handle if entry will be part of a page. */
 +              offset = dma_addr & page_mask;
 +              entry_len = ioc->page_size - offset;
 +
 +              if (prp_entry == prp1_entry) {
 +                      /*
 +                       * Must fill in the first PRP pointer (PRP1) before
 +                       * moving on.
 +                       */
 +                      *prp1_entry = cpu_to_le64(dma_addr);
 +
 +                      /*
 +                       * Now point to the second PRP entry within the
 +                       * command (PRP2).
 +                       */
 +                      prp_entry = prp2_entry;
 +              } else if (prp_entry == prp2_entry) {
 +                      /*
 +                       * Should the PRP2 entry be a PRP List pointer or just
 +                       * a regular PRP pointer?  If there is more than one
 +                       * more page of data, must use a PRP List pointer.
 +                       */
 +                      if (length > ioc->page_size) {
 +                              /*
 +                               * PRP2 will contain a PRP List pointer because
 +                               * more PRP's are needed with this command. The
 +                               * list will start at the beginning of the
 +                               * contiguous buffer.
 +                               */
 +                              *prp2_entry = cpu_to_le64(prp_entry_dma);
 +
 +                              /*
 +                               * The next PRP Entry will be the start of the
 +                               * first PRP List.
 +                               */
 +                              prp_entry = prp_page;
 +                      } else {
 +                              /*
 +                               * After this, the PRP Entries are complete.
 +                               * This command uses 2 PRP's and no PRP list.
 +                               */
 +                              *prp2_entry = cpu_to_le64(dma_addr);
 +                      }
 +              } else {
 +                      /*
 +                       * Put entry in list and bump the addresses.
 +                       *
 +                       * After PRP1 and PRP2 are filled in, this will fill in
 +                       * all remaining PRP entries in a PRP List, one per
 +                       * each time through the loop.
 +                       */
 +                      *prp_entry = cpu_to_le64(dma_addr);
 +                      prp_entry++;
 +                      prp_entry_dma++;
 +              }
 +
 +              /*
 +               * Bump the phys address of the command's data buffer by the
 +               * entry_len.
 +               */
 +              dma_addr += entry_len;
 +
 +              /* Decrement length accounting for last partial page. */
 +              if (entry_len > length)
 +                      length = 0;
 +              else
 +                      length -= entry_len;
 +      }
 +}
 +
 +/**
 + * base_make_prp_nvme -
 + * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
 + *
 + * @ioc:              per adapter object
 + * @scmd:             SCSI command from the mid-layer
 + * @mpi_request:      mpi request
 + * @smid:             msg Index
 + * @sge_count:                scatter gather element count.
 + *
 + * Returns:           true: PRPs are built
 + *                    false: IEEE SGLs needs to be built
 + */
 +static void
 +base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
 +              struct scsi_cmnd *scmd,
 +              Mpi25SCSIIORequest_t *mpi_request,
 +              u16 smid, int sge_count)
 +{
 +      int sge_len, num_prp_in_chain = 0;
 +      Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
 +      __le64 *curr_buff;
 +      dma_addr_t msg_dma, sge_addr, offset;
 +      u32 page_mask, page_mask_result;
 +      struct scatterlist *sg_scmd;
 +      u32 first_prp_len;
 +      int data_len = scsi_bufflen(scmd);
 +      u32 nvme_pg_size;
 +
 +      nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
 +      /*
 +       * Nvme has a very convoluted prp format.  One prp is required
 +       * for each page or partial page. Driver need to split up OS sg_list
 +       * entries if it is longer than one page or cross a page
 +       * boundary.  Driver also have to insert a PRP list pointer entry as
 +       * the last entry in each physical page of the PRP list.
 +       *
 +       * NOTE: The first PRP "entry" is actually placed in the first
 +       * SGL entry in the main message as IEEE 64 format.  The 2nd
 +       * entry in the main message is the chain element, and the rest
 +       * of the PRP entries are built in the contiguous pcie buffer.
 +       */
 +      page_mask = nvme_pg_size - 1;
 +
 +      /*
 +       * Native SGL is needed.
 +       * Put a chain element in main message frame that points to the first
 +       * chain buffer.
 +       *
 +       * NOTE:  The ChainOffset field must be 0 when using a chain pointer to
 +       *        a native SGL.
 +       */
 +
 +      /* Set main message chain element pointer */
 +      main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
 +      /*
 +       * For NVMe the chain element needs to be the 2nd SG entry in the main
 +       * message.
 +       */
 +      main_chain_element = (Mpi25IeeeSgeChain64_t *)
 +              ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
 +
 +      /*
 +       * For the PRP entries, use the specially allocated buffer of
 +       * contiguous memory.  Normal chain buffers can't be used
 +       * because each chain buffer would need to be the size of an OS
 +       * page (4k).
 +       */
 +      curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
 +      msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
 +
 +      main_chain_element->Address = cpu_to_le64(msg_dma);
 +      main_chain_element->NextChainOffset = 0;
 +      main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
 +                      MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
 +                      MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
 +
 +      /* Build first prp, sge need not to be page aligned*/
 +      ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
 +      sg_scmd = scsi_sglist(scmd);
 +      sge_addr = sg_dma_address(sg_scmd);
 +      sge_len = sg_dma_len(sg_scmd);
 +
 +      offset = sge_addr & page_mask;
 +      first_prp_len = nvme_pg_size - offset;
 +
 +      ptr_first_sgl->Address = cpu_to_le64(sge_addr);
 +      ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
 +
 +      data_len -= first_prp_len;
 +
 +      if (sge_len > first_prp_len) {
 +              sge_addr += first_prp_len;
 +              sge_len -= first_prp_len;
 +      } else if (data_len && (sge_len == first_prp_len)) {
 +              sg_scmd = sg_next(sg_scmd);
 +              sge_addr = sg_dma_address(sg_scmd);
 +              sge_len = sg_dma_len(sg_scmd);
 +      }
 +
 +      for (;;) {
 +              offset = sge_addr & page_mask;
 +
 +              /* Put PRP pointer due to page boundary*/
 +              page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
 +              if (unlikely(!page_mask_result)) {
 +                      scmd_printk(KERN_NOTICE,
 +                              scmd, "page boundary curr_buff: 0x%p\n",
 +                              curr_buff);
 +                      msg_dma += 8;
 +                      *curr_buff = cpu_to_le64(msg_dma);
 +                      curr_buff++;
 +                      num_prp_in_chain++;
 +              }
 +
 +              *curr_buff = cpu_to_le64(sge_addr);
 +              curr_buff++;
 +              msg_dma += 8;
 +              num_prp_in_chain++;
 +
 +              sge_addr += nvme_pg_size;
 +              sge_len -= nvme_pg_size;
 +              data_len -= nvme_pg_size;
 +
 +              if (data_len <= 0)
 +                      break;
 +
 +              if (sge_len > 0)
 +                      continue;
 +
 +              sg_scmd = sg_next(sg_scmd);
 +              sge_addr = sg_dma_address(sg_scmd);
 +              sge_len = sg_dma_len(sg_scmd);
 +      }
 +
 +      main_chain_element->Length =
 +              cpu_to_le32(num_prp_in_chain * sizeof(u64));
 +      return;
 +}
 +
 +static bool
 +base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
 +      struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
 +{
 +      u32 data_length = 0;
 +      struct scatterlist *sg_scmd;
 +      bool build_prp = true;
 +
 +      data_length = scsi_bufflen(scmd);
 +      sg_scmd = scsi_sglist(scmd);
 +
 +      /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
 +       * we built IEEE SGL
 +       */
 +      if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
 +              build_prp = false;
 +
 +      return build_prp;
 +}
 +
 +/**
 + * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
 + * determine if the driver needs to build a native SGL.  If so, that native
 + * SGL is built in the special contiguous buffers allocated especially for
 + * PCIe SGL creation.  If the driver will not build a native SGL, return
 + * TRUE and a normal IEEE SGL will be built.  Currently this routine
 + * supports NVMe.
 + * @ioc: per adapter object
 + * @mpi_request: mf request pointer
 + * @smid: system request message index
 + * @scmd: scsi command
 + * @pcie_device: points to the PCIe device's info
 + *
 + * Returns 0 if native SGL was built, 1 if no SGL was built
 + */
 +static int
 +_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
 +      Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
 +      struct _pcie_device *pcie_device)
 +{
 +      struct scatterlist *sg_scmd;
 +      int sges_left;
 +
 +      /* Get the SG list pointer and info. */
 +      sg_scmd = scsi_sglist(scmd);
 +      sges_left = scsi_dma_map(scmd);
 +      if (sges_left < 0) {
 +              sdev_printk(KERN_ERR, scmd->device,
 +                      "scsi_dma_map failed: request for %d bytes!\n",
 +                      scsi_bufflen(scmd));
 +              return 1;
 +      }
 +
 +      /* Check if we need to build a native SG list. */
 +      if (base_is_prp_possible(ioc, pcie_device,
 +                              scmd, sges_left) == 0) {
 +              /* We built a native SG list, just return. */
 +              goto out;
 +      }
 +
 +      /*
 +       * Build native NVMe PRP.
 +       */
 +      base_make_prp_nvme(ioc, scmd, mpi_request,
 +                      smid, sges_left);
 +
 +      return 0;
 +out:
 +      scsi_dma_unmap(scmd);
 +      return 1;
 +}
 +
  /**
   * _base_add_sg_single_ieee - add sg element for IEEE format
   * @paddr: virtual address for SGE
@@@ -1846,11 -1391,9 +1846,11 @@@ _base_build_zero_len_sge_ieee(struct MP
  
  /**
   * _base_build_sg_scmd - main sg creation routine
 + *            pcie_device is unused here!
   * @ioc: per adapter object
   * @scmd: scsi command
   * @smid: system request message index
 + * @unused: unused pcie_device pointer
   * Context: none.
   *
   * The main routine that builds scatter gather table from a given
   */
  static int
  _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
 -              struct scsi_cmnd *scmd, u16 smid)
 +      struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
  {
        Mpi2SCSIIORequest_t *mpi_request;
        dma_addr_t chain_dma;
   * @ioc: per adapter object
   * @scmd: scsi command
   * @smid: system request message index
 + * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
 + * constructed on need.
   * Context: none.
   *
   * The main routine that builds scatter gather table from a given
   */
  static int
  _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
 -      struct scsi_cmnd *scmd, u16 smid)
 +      struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
  {
 -      Mpi2SCSIIORequest_t *mpi_request;
 +      Mpi25SCSIIORequest_t *mpi_request;
        dma_addr_t chain_dma;
        struct scatterlist *sg_scmd;
        void *sg_local, *chain;
        chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
            MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
  
 +      /* Check if we need to build a native SG list. */
 +      if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
 +                      smid, scmd, pcie_device) == 0)) {
 +              /* We built a native SG list, just return. */
 +              return 0;
 +      }
 +
        sg_scmd = scsi_sglist(scmd);
        sges_left = scsi_dma_map(scmd);
        if (sges_left < 0) {
  
        sg_local = &mpi_request->SGL;
        sges_in_segment = (ioc->request_sz -
 -          offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
 +                 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
        if (sges_left <= sges_in_segment)
                goto fill_in_last_segment;
  
        mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
 -          (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
 +          (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
  
        /* fill in main message segment when there is a chain following */
        while (sges_in_segment > 1) {
@@@ -2456,7 -1990,7 +2456,7 @@@ _base_enable_msix(struct MPT3SAS_ADAPTE
          ioc->cpu_count, max_msix_vectors);
  
        if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
 -              local_max_msix_vectors = 8;
 +              local_max_msix_vectors = (reset_devices) ? 1 : 8;
        else
                local_max_msix_vectors = max_msix_vectors;
  
@@@ -2732,32 -2266,6 +2732,32 @@@ mpt3sas_base_get_sense_buffer_dma(struc
            SCSI_SENSE_BUFFERSIZE));
  }
  
 +/**
 + * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
 + * @ioc: per adapter object
 + * @smid: system request message index
 + *
 + * Returns virt pointer to a PCIe SGL.
 + */
 +void *
 +mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 +{
 +      return (void *)(ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl);
 +}
 +
 +/**
 + * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
 + * @ioc: per adapter object
 + * @smid: system request message index
 + *
 + * Returns phys pointer to the address of the PCIe buffer.
 + */
 +dma_addr_t
 +mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 +{
 +      return ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl_dma;
 +}
 +
  /**
   * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
   * @ioc: per adapter object
@@@ -3035,30 -2543,6 +3035,30 @@@ _base_put_smid_hi_priority(struct MPT3S
            &ioc->scsi_lookup_lock);
  }
  
 +/**
 + * _base_put_smid_nvme_encap - send NVMe encapsulated request to
 + *  firmware
 + * @ioc: per adapter object
 + * @smid: system request message index
 + *
 + * Return nothing.
 + */
 +static void
 +_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 +{
 +      Mpi2RequestDescriptorUnion_t descriptor;
 +      u64 *request = (u64 *)&descriptor;
 +
 +      descriptor.Default.RequestFlags =
 +              MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
 +      descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
 +      descriptor.Default.SMID = cpu_to_le16(smid);
 +      descriptor.Default.LMID = 0;
 +      descriptor.Default.DescriptorTypeDependent = 0;
 +      _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
 +          &ioc->scsi_lookup_lock);
 +}
 +
  /**
   * _base_put_smid_default - Default, primarily used for config pages
   * @ioc: per adapter object
@@@ -3149,27 -2633,6 +3149,27 @@@ _base_put_smid_hi_priority_atomic(struc
        writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
  }
  
 +/**
 + * _base_put_smid_nvme_encap_atomic - send NVMe encapsulated request to
 + *   firmware using Atomic Request Descriptor
 + * @ioc: per adapter object
 + * @smid: system request message index
 + *
 + * Return nothing.
 + */
 +static void
 +_base_put_smid_nvme_encap_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 +{
 +      Mpi26AtomicRequestDescriptor_t descriptor;
 +      u32 *request = (u32 *)&descriptor;
 +
 +      descriptor.RequestFlags = MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
 +      descriptor.MSIxIndex = _base_get_msix_index(ioc);
 +      descriptor.SMID = cpu_to_le16(smid);
 +
 +      writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
 +}
 +
  /**
   * _base_put_smid_default - Default, primarily used for config pages
   * use Atomic Request Descriptor
@@@ -3482,11 -2945,6 +3482,11 @@@ _base_display_ioc_capabilities(struct M
  
        _base_display_OEMs_branding(ioc);
  
 +      if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
 +              pr_info("%sNVMe", i ? "," : "");
 +              i++;
 +      }
 +
        pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
  
        if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
@@@ -3787,17 -3245,6 +3787,17 @@@ _base_release_memory_pools(struct MPT3S
                kfree(ioc->reply_post);
        }
  
 +      if (ioc->pcie_sgl_dma_pool) {
 +              for (i = 0; i < ioc->scsiio_depth; i++) {
 +                      if (ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl)
 +                              pci_pool_free(ioc->pcie_sgl_dma_pool,
 +                              ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl,
 +                              ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
 +              }
 +              if (ioc->pcie_sgl_dma_pool)
 +                      pci_pool_destroy(ioc->pcie_sgl_dma_pool);
 +      }
 +
        if (ioc->config_page) {
                dexitprintk(ioc, pr_info(MPT3SAS_FMT
                    "config_page(0x%p): free\n", ioc->name,
@@@ -3839,7 -3286,7 +3839,7 @@@ _base_allocate_memory_pools(struct MPT3
        u16 chains_needed_per_io;
        u32 sz, total_sz, reply_post_free_sz;
        u32 retry_sz;
 -      u16 max_request_credit;
 +      u16 max_request_credit, nvme_blocks_needed;
        unsigned short sg_tablesize;
        u16 sge_size;
        int i;
                        sg_tablesize = MPT3SAS_SG_DEPTH;
        }
  
 +      /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
 +      if (reset_devices)
 +              sg_tablesize = min_t(unsigned short, sg_tablesize,
 +                 MPT_KDUMP_MIN_PHYS_SEGMENTS);
 +
        if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
                sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
        else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
                        ioc->internal_depth, facts->RequestCredit);
                if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
                        max_request_credit =  MAX_HBA_QUEUE_DEPTH;
 -      } else
 +      } else if (reset_devices)
 +              max_request_credit = min_t(u16, facts->RequestCredit,
 +                  (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
 +      else
                max_request_credit = min_t(u16, facts->RequestCredit,
                    MAX_HBA_QUEUE_DEPTH);
  
                "internal(0x%p): depth(%d), start smid(%d)\n",
                ioc->name, ioc->internal,
            ioc->internal_depth, ioc->internal_smid));
 +      /*
 +       * The number of NVMe page sized blocks needed is:
 +       *     (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
 +       * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
 +       * that is placed in the main message frame.  8 is the size of each PRP
 +       * entry or PRP list pointer entry.  8 is subtracted from page_size
 +       * because of the PRP list pointer entry at the end of a page, so this
 +       * is not counted as a PRP entry.  The 1 added page is a round up.
 +       *
 +       * To avoid allocation failures due to the amount of memory that could
 +       * be required for NVMe PRP's, only each set of NVMe blocks will be
 +       * contiguous, so a new set is allocated for each possible I/O.
 +       */
 +      if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
 +              nvme_blocks_needed =
 +                      (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
 +              nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
 +              nvme_blocks_needed++;
 +
 +              sz = nvme_blocks_needed * ioc->page_size;
 +              ioc->pcie_sgl_dma_pool =
 +                      pci_pool_create("PCIe SGL pool", ioc->pdev, sz, 16, 0);
 +              if (!ioc->pcie_sgl_dma_pool) {
 +                      pr_info(MPT3SAS_FMT
 +                          "PCIe SGL pool: pci_pool_create failed\n",
 +                          ioc->name);
 +                      goto out;
 +              }
 +              for (i = 0; i < ioc->scsiio_depth; i++) {
 +                      ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl =
 +                                      pci_pool_alloc(ioc->pcie_sgl_dma_pool,
 +                                      GFP_KERNEL,
 +                              &ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
 +                      if (!ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl) {
 +                              pr_info(MPT3SAS_FMT
 +                                  "PCIe SGL pool: pci_pool_alloc failed\n",
 +                                  ioc->name);
 +                              goto out;
 +                      }
 +              }
  
 +              dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
 +                      "element_size(%d), pool_size(%d kB)\n", ioc->name,
 +                      ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
 +              total_sz += sz * ioc->scsiio_depth;
 +      }
        /* sense buffers, 4 byte align */
        sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
        ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
@@@ -5052,7 -4446,7 +5052,7 @@@ _base_get_ioc_facts(struct MPT3SAS_ADAP
        if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
                ioc->ir_firmware = 1;
        if ((facts->IOCCapabilities &
 -            MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
 +            MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
                ioc->rdpq_array_capable = 1;
        if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
                ioc->atomic_desc_capable = 1;
            le16_to_cpu(mpi_reply.HighPriorityCredit);
        facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
        facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
 +      facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
 +
 +      /*
 +       * Get the Page Size from IOC Facts. If it's 0, default to 4k.
 +       */
 +      ioc->page_size = 1 << facts->CurrentHostPageSize;
 +      if (ioc->page_size == 1) {
 +              pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
 +                      "default host page size to 4k\n", ioc->name);
 +              ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
 +      }
 +      dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
 +              ioc->name, facts->CurrentHostPageSize));
  
        dinitprintk(ioc, pr_info(MPT3SAS_FMT
                "hba queue depth(%d), max chains per io(%d)\n",
@@@ -5125,7 -4506,6 +5125,7 @@@ _base_send_ioc_init(struct MPT3SAS_ADAP
        mpi_request.VP_ID = 0;
        mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
        mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
 +      mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
  
        if (_base_is_controller_msix_enabled(ioc))
                mpi_request.HostMSIxVectors = ioc->reply_queue_count;
@@@ -5994,7 -5374,6 +5994,7 @@@ mpt3sas_base_attach(struct MPT3SAS_ADAP
                 */
                ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
                ioc->build_sg = &_base_build_sg_ieee;
 +              ioc->build_nvme_prp = &_base_build_nvme_prp;
                ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
                ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
  
                ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
                ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
                ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
 +              ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap_atomic;
        } else {
                ioc->put_smid_default = &_base_put_smid_default;
                ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
                ioc->put_smid_fast_path = &_base_put_smid_fast_path;
                ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
 +              ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap;
        }
  
  
        _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
        _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
        _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
 -      if (ioc->hba_mpi_version_belonged == MPI26_VERSION)
 -              _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
 -
 +      _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
 +      if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
 +              if (ioc->is_gen35_ioc) {
 +                      _base_unmask_events(ioc,
 +                              MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
 +                      _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
 +                      _base_unmask_events(ioc,
 +                              MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
 +              }
 +      }
        r = _base_make_ioc_operational(ioc);
        if (r)
                goto out_free_resources;
index 362f406a285ef9ee3fc4ccb72ad8914ec54f3b53,07719da7ae4aa76586a4184028b7484af05d769a..b258f210120ad98bc060d0a9c44c68c7735bc921
@@@ -60,9 -60,6 +60,9 @@@
  #include "mpt3sas_base.h"
  
  #define RAID_CHANNEL 1
 +
 +#define PCIE_CHANNEL 2
 +
  /* forward proto's */
  static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
        struct _sas_node *sas_expander);
@@@ -72,11 -69,7 +72,11 @@@ static void _scsih_remove_device(struc
        struct _sas_device *sas_device);
  static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
        u8 retry_count, u8 is_pd);
 -
 +static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
 +static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
 +      struct _pcie_device *pcie_device);
 +static void
 +_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
  static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
  
  /* global parameters */
@@@ -288,7 -281,7 +288,7 @@@ struct _scsi_io_transfer 
   * Note: The logging levels are defined in mpt3sas_debug.h.
   */
  static int
- _scsih_set_debug_level(const char *val, struct kernel_param *kp)
+ _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
  {
        int ret = param_set_int(val, kp);
        struct MPT3SAS_ADAPTER *ioc;
@@@ -413,6 -406,11 +413,6 @@@ _scsih_get_sas_address(struct MPT3SAS_A
  
        *sas_address = 0;
  
 -      if (handle <= ioc->sas_hba.num_phys) {
 -              *sas_address = ioc->sas_hba.sas_address;
 -              return 0;
 -      }
 -
        if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
            MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
                pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
  
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
        if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
 -              *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
 +              /* For HBA, vSES doesn't return HBA SAS address. Instead return
 +               * vSES's sas address.
 +               */
 +              if ((handle <= ioc->sas_hba.num_phys) &&
 +                 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
 +                 MPI2_SAS_DEVICE_INFO_SEP)))
 +                      *sas_address = ioc->sas_hba.sas_address;
 +              else
 +                      *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
                return 0;
        }
  
  /**
   * _scsih_determine_boot_device - determine boot device.
   * @ioc: per adapter object
 - * @device: either sas_device or raid_device object
 - * @is_raid: [flag] 1 = raid object, 0 = sas object
 + * @device: sas_device or pcie_device object
 + * @channel: SAS or PCIe channel
   *
   * Determines whether this device should be first reported device to
   * to scsi-ml or sas transport, this purpose is for persistent boot device.
   * There are primary, alternate, and current entries in bios page 2. The order
   * priority is primary, alternate, then current.  This routine saves
 - * the corresponding device object and is_raid flag in the ioc object.
 + * the corresponding device object.
   * The saved data to be used later in _scsih_probe_boot_devices().
   */
  static void
 -_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
 -      void *device, u8 is_raid)
 +_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
 +      u32 channel)
  {
        struct _sas_device *sas_device;
 +      struct _pcie_device *pcie_device;
        struct _raid_device *raid_device;
        u64 sas_address;
        u64 device_name;
        if (!ioc->bios_pg3.BiosVersion)
                return;
  
 -      if (!is_raid) {
 -              sas_device = device;
 -              sas_address = sas_device->sas_address;
 -              device_name = sas_device->device_name;
 -              enclosure_logical_id = sas_device->enclosure_logical_id;
 -              slot = sas_device->slot;
 -      } else {
 +      if (channel == RAID_CHANNEL) {
                raid_device = device;
                sas_address = raid_device->wwid;
                device_name = 0;
                enclosure_logical_id = 0;
                slot = 0;
 +      } else if (channel == PCIE_CHANNEL) {
 +              pcie_device = device;
 +              sas_address = pcie_device->wwid;
 +              device_name = 0;
 +              enclosure_logical_id = 0;
 +              slot = 0;
 +      } else {
 +              sas_device = device;
 +              sas_address = sas_device->sas_address;
 +              device_name = sas_device->device_name;
 +              enclosure_logical_id = sas_device->enclosure_logical_id;
 +              slot = sas_device->slot;
        }
  
        if (!ioc->req_boot_device.device) {
                            ioc->name, __func__,
                            (unsigned long long)sas_address));
                        ioc->req_boot_device.device = device;
 -                      ioc->req_boot_device.is_raid = is_raid;
 +                      ioc->req_boot_device.channel = channel;
                }
        }
  
                            ioc->name, __func__,
                            (unsigned long long)sas_address));
                        ioc->req_alt_boot_device.device = device;
 -                      ioc->req_alt_boot_device.is_raid = is_raid;
 +                      ioc->req_alt_boot_device.channel = channel;
                }
        }
  
                            ioc->name, __func__,
                            (unsigned long long)sas_address));
                        ioc->current_boot_device.device = device;
 -                      ioc->current_boot_device.is_raid = is_raid;
 +                      ioc->current_boot_device.channel = channel;
                }
        }
  }
@@@ -553,7 -536,7 +553,7 @@@ __mpt3sas_get_sdev_from_target(struct M
  
        assert_spin_locked(&ioc->sas_device_lock);
  
 -      ret = tgt_priv->sdev;
 +      ret = tgt_priv->sas_dev;
        if (ret)
                sas_device_get(ret);
  
@@@ -574,44 -557,6 +574,44 @@@ mpt3sas_get_sdev_from_target(struct MPT
        return ret;
  }
  
 +static struct _pcie_device *
 +__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
 +      struct MPT3SAS_TARGET *tgt_priv)
 +{
 +      struct _pcie_device *ret;
 +
 +      assert_spin_locked(&ioc->pcie_device_lock);
 +
 +      ret = tgt_priv->pcie_dev;
 +      if (ret)
 +              pcie_device_get(ret);
 +
 +      return ret;
 +}
 +
 +/**
 + * mpt3sas_get_pdev_from_target - pcie device search
 + * @ioc: per adapter object
 + * @tgt_priv: starget private object
 + *
 + * Context: This function will acquire ioc->pcie_device_lock and will release
 + * before returning the pcie_device object.
 + *
 + * This searches for pcie_device from target, then return pcie_device object.
 + */
 +static struct _pcie_device *
 +mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
 +      struct MPT3SAS_TARGET *tgt_priv)
 +{
 +      struct _pcie_device *ret;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +      ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
 +      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +
 +      return ret;
 +}
  
  struct _sas_device *
  __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
@@@ -691,7 -636,7 +691,7 @@@ found_device
   * This searches for sas_device based on sas_address, then return sas_device
   * object.
   */
 -static struct _sas_device *
 +struct _sas_device *
  mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  {
        struct _sas_device *sas_device;
        return sas_device;
  }
  
 +/**
 + * _scsih_display_enclosure_chassis_info - display device location info
 + * @ioc: per adapter object
 + * @sas_device: per sas device object
 + * @sdev: scsi device struct
 + * @starget: scsi target struct
 + *
 + * Returns nothing.
 + */
 +static void
 +_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
 +      struct _sas_device *sas_device, struct scsi_device *sdev,
 +      struct scsi_target *starget)
 +{
 +      if (sdev) {
 +              if (sas_device->enclosure_handle != 0)
 +                      sdev_printk(KERN_INFO, sdev,
 +                          "enclosure logical id (0x%016llx), slot(%d) \n",
 +                          (unsigned long long)
 +                          sas_device->enclosure_logical_id,
 +                          sas_device->slot);
 +              if (sas_device->connector_name[0] != '\0')
 +                      sdev_printk(KERN_INFO, sdev,
 +                          "enclosure level(0x%04x), connector name( %s)\n",
 +                          sas_device->enclosure_level,
 +                          sas_device->connector_name);
 +              if (sas_device->is_chassis_slot_valid)
 +                      sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
 +                          sas_device->chassis_slot);
 +      } else if (starget) {
 +              if (sas_device->enclosure_handle != 0)
 +                      starget_printk(KERN_INFO, starget,
 +                          "enclosure logical id(0x%016llx), slot(%d) \n",
 +                          (unsigned long long)
 +                          sas_device->enclosure_logical_id,
 +                          sas_device->slot);
 +              if (sas_device->connector_name[0] != '\0')
 +                      starget_printk(KERN_INFO, starget,
 +                          "enclosure level(0x%04x), connector name( %s)\n",
 +                          sas_device->enclosure_level,
 +                          sas_device->connector_name);
 +              if (sas_device->is_chassis_slot_valid)
 +                      starget_printk(KERN_INFO, starget,
 +                          "chassis slot(0x%04x)\n",
 +                          sas_device->chassis_slot);
 +      } else {
 +              if (sas_device->enclosure_handle != 0)
 +                      pr_info(MPT3SAS_FMT
 +                          "enclosure logical id(0x%016llx), slot(%d) \n",
 +                          ioc->name, (unsigned long long)
 +                          sas_device->enclosure_logical_id,
 +                          sas_device->slot);
 +              if (sas_device->connector_name[0] != '\0')
 +                      pr_info(MPT3SAS_FMT
 +                          "enclosure level(0x%04x), connector name( %s)\n",
 +                          ioc->name, sas_device->enclosure_level,
 +                          sas_device->connector_name);
 +              if (sas_device->is_chassis_slot_valid)
 +                      pr_info(MPT3SAS_FMT "chassis slot(0x%04x)\n",
 +                          ioc->name, sas_device->chassis_slot);
 +      }
 +}
 +
  /**
   * _scsih_sas_device_remove - remove sas_device from list.
   * @ioc: per adapter object
@@@ -788,7 -670,17 +788,7 @@@ _scsih_sas_device_remove(struct MPT3SAS
            ioc->name, sas_device->handle,
            (unsigned long long) sas_device->sas_address);
  
 -      if (sas_device->enclosure_handle != 0)
 -              pr_info(MPT3SAS_FMT
 -                 "removing enclosure logical id(0x%016llx), slot(%d)\n",
 -                 ioc->name, (unsigned long long)
 -                 sas_device->enclosure_logical_id, sas_device->slot);
 -
 -      if (sas_device->connector_name[0] != '\0')
 -              pr_info(MPT3SAS_FMT
 -                 "removing enclosure level(0x%04x), connector name( %s)\n",
 -                 ioc->name, sas_device->enclosure_level,
 -                 sas_device->connector_name);
 +      _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
  
        /*
         * The lock serializes access to the list, but we still need to verify
@@@ -880,8 -772,17 +880,8 @@@ _scsih_sas_device_add(struct MPT3SAS_AD
                ioc->name, __func__, sas_device->handle,
                (unsigned long long)sas_device->sas_address));
  
 -      if (sas_device->enclosure_handle != 0)
 -              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 -                  "%s: enclosure logical id(0x%016llx), slot( %d)\n",
 -                  ioc->name, __func__, (unsigned long long)
 -                  sas_device->enclosure_logical_id, sas_device->slot));
 -
 -      if (sas_device->connector_name[0] != '\0')
 -              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 -                  "%s: enclosure level(0x%04x), connector name( %s)\n",
 -                  ioc->name, __func__,
 -                  sas_device->enclosure_level, sas_device->connector_name));
 +      dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
 +          NULL, NULL));
  
        spin_lock_irqsave(&ioc->sas_device_lock, flags);
        sas_device_get(sas_device);
@@@ -931,8 -832,17 +931,8 @@@ _scsih_sas_device_init_add(struct MPT3S
                __func__, sas_device->handle,
                (unsigned long long)sas_device->sas_address));
  
 -      if (sas_device->enclosure_handle != 0)
 -              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 -                  "%s: enclosure logical id(0x%016llx), slot( %d)\n",
 -                  ioc->name, __func__, (unsigned long long)
 -                  sas_device->enclosure_logical_id, sas_device->slot));
 -
 -      if (sas_device->connector_name[0] != '\0')
 -              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 -                  "%s: enclosure level(0x%04x), connector name( %s)\n",
 -                  ioc->name, __func__, sas_device->enclosure_level,
 -                  sas_device->connector_name));
 +      dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
 +          NULL, NULL));
  
        spin_lock_irqsave(&ioc->sas_device_lock, flags);
        sas_device_get(sas_device);
        spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  }
  
 +
 +static struct _pcie_device *
 +__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
 +{
 +      struct _pcie_device *pcie_device;
 +
 +      assert_spin_locked(&ioc->pcie_device_lock);
 +
 +      list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
 +              if (pcie_device->wwid == wwid)
 +                      goto found_device;
 +
 +      list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
 +              if (pcie_device->wwid == wwid)
 +                      goto found_device;
 +
 +      return NULL;
 +
 +found_device:
 +      pcie_device_get(pcie_device);
 +      return pcie_device;
 +}
 +
 +
 +/**
 + * mpt3sas_get_pdev_by_wwid - pcie device search
 + * @ioc: per adapter object
 + * @wwid: wwid
 + *
 + * Context: This function will acquire ioc->pcie_device_lock and will release
 + * before returning the pcie_device object.
 + *
 + * This searches for pcie_device based on wwid, then return pcie_device object.
 + */
 +static struct _pcie_device *
 +mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
 +{
 +      struct _pcie_device *pcie_device;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +      pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
 +      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +
 +      return pcie_device;
 +}
 +
 +
 +static struct _pcie_device *
 +__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
 +      int channel)
 +{
 +      struct _pcie_device *pcie_device;
 +
 +      assert_spin_locked(&ioc->pcie_device_lock);
 +
 +      list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
 +              if (pcie_device->id == id && pcie_device->channel == channel)
 +                      goto found_device;
 +
 +      list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
 +              if (pcie_device->id == id && pcie_device->channel == channel)
 +                      goto found_device;
 +
 +      return NULL;
 +
 +found_device:
 +      pcie_device_get(pcie_device);
 +      return pcie_device;
 +}
 +
 +static struct _pcie_device *
 +__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 +{
 +      struct _pcie_device *pcie_device;
 +
 +      assert_spin_locked(&ioc->pcie_device_lock);
 +
 +      list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
 +              if (pcie_device->handle == handle)
 +                      goto found_device;
 +
 +      list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
 +              if (pcie_device->handle == handle)
 +                      goto found_device;
 +
 +      return NULL;
 +
 +found_device:
 +      pcie_device_get(pcie_device);
 +      return pcie_device;
 +}
 +
 +
 +/**
 + * mpt3sas_get_pdev_by_handle - pcie device search
 + * @ioc: per adapter object
 + * @handle: Firmware device handle
 + *
 + * Context: This function will acquire ioc->pcie_device_lock and will release
 + * before returning the pcie_device object.
 + *
 + * This searches for pcie_device based on handle, then return pcie_device
 + * object.
 + */
 +struct _pcie_device *
 +mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 +{
 +      struct _pcie_device *pcie_device;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +      pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
 +      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +
 +      return pcie_device;
 +}
 +
 +/**
 + * _scsih_pcie_device_remove - remove pcie_device from list.
 + * @ioc: per adapter object
 + * @pcie_device: the pcie_device object
 + * Context: This function will acquire ioc->pcie_device_lock.
 + *
 + * If pcie_device is on the list, remove it and decrement its reference count.
 + */
 +static void
 +_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
 +      struct _pcie_device *pcie_device)
 +{
 +      unsigned long flags;
 +      int was_on_pcie_device_list = 0;
 +
 +      if (!pcie_device)
 +              return;
 +      pr_info(MPT3SAS_FMT
 +              "removing handle(0x%04x), wwid(0x%016llx)\n",
 +              ioc->name, pcie_device->handle,
 +              (unsigned long long) pcie_device->wwid);
 +      if (pcie_device->enclosure_handle != 0)
 +              pr_info(MPT3SAS_FMT
 +                      "removing enclosure logical id(0x%016llx), slot(%d)\n",
 +                      ioc->name,
 +                      (unsigned long long)pcie_device->enclosure_logical_id,
 +              pcie_device->slot);
 +      if (pcie_device->connector_name[0] != '\0')
 +              pr_info(MPT3SAS_FMT
 +                      "removing enclosure level(0x%04x), connector name( %s)\n",
 +                      ioc->name, pcie_device->enclosure_level,
 +                      pcie_device->connector_name);
 +
 +      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +      if (!list_empty(&pcie_device->list)) {
 +              list_del_init(&pcie_device->list);
 +              was_on_pcie_device_list = 1;
 +      }
 +      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +      if (was_on_pcie_device_list) {
 +              kfree(pcie_device->serial_number);
 +              pcie_device_put(pcie_device);
 +      }
 +}
 +
 +
 +/**
 + * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
 + * @ioc: per adapter object
 + * @handle: device handle
 + *
 + * Return nothing.
 + */
 +static void
 +_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 +{
 +      struct _pcie_device *pcie_device;
 +      unsigned long flags;
 +      int was_on_pcie_device_list = 0;
 +
 +      if (ioc->shost_recovery)
 +              return;
 +
 +      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +      pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
 +      if (pcie_device) {
 +              if (!list_empty(&pcie_device->list)) {
 +                      list_del_init(&pcie_device->list);
 +                      was_on_pcie_device_list = 1;
 +                      pcie_device_put(pcie_device);
 +              }
 +      }
 +      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +      if (was_on_pcie_device_list) {
 +              _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
 +              pcie_device_put(pcie_device);
 +      }
 +}
 +
 +/**
 + * _scsih_pcie_device_add - add pcie_device object
 + * @ioc: per adapter object
 + * @pcie_device: pcie_device object
 + *
 + * This is added to the pcie_device_list link list.
 + */
 +static void
 +_scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
 +      struct _pcie_device *pcie_device)
 +{
 +      unsigned long flags;
 +
 +      dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +              "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
 +              pcie_device->handle, (unsigned long long)pcie_device->wwid));
 +      if (pcie_device->enclosure_handle != 0)
 +              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                      "%s: enclosure logical id(0x%016llx), slot( %d)\n",
 +                      ioc->name, __func__,
 +                      (unsigned long long)pcie_device->enclosure_logical_id,
 +                      pcie_device->slot));
 +      if (pcie_device->connector_name[0] != '\0')
 +              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                      "%s: enclosure level(0x%04x), connector name( %s)\n",
 +                      ioc->name, __func__, pcie_device->enclosure_level,
 +                      pcie_device->connector_name));
 +
 +      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +      pcie_device_get(pcie_device);
 +      list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
 +      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +
 +      if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
 +              _scsih_pcie_device_remove(ioc, pcie_device);
 +      } else if (!pcie_device->starget) {
 +              if (!ioc->is_driver_loading) {
 +/*TODO-- Need to find out whether this condition will occur or not*/
 +                      clear_bit(pcie_device->handle, ioc->pend_os_device_add);
 +              }
 +      } else
 +              clear_bit(pcie_device->handle, ioc->pend_os_device_add);
 +}
 +
 +/*
 + * _scsih_pcie_device_init_add - insert pcie_device to the init list.
 + * @ioc: per adapter object
 + * @pcie_device: the pcie_device object
 + * Context: This function will acquire ioc->pcie_device_lock.
 + *
 + * Adding new object at driver load time to the ioc->pcie_device_init_list.
 + */
 +static void
 +_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
 +                              struct _pcie_device *pcie_device)
 +{
 +      unsigned long flags;
 +
 +      dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +              "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
 +              pcie_device->handle, (unsigned long long)pcie_device->wwid));
 +      if (pcie_device->enclosure_handle != 0)
 +              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                      "%s: enclosure logical id(0x%016llx), slot( %d)\n",
 +                      ioc->name, __func__,
 +                      (unsigned long long)pcie_device->enclosure_logical_id,
 +                      pcie_device->slot));
 +      if (pcie_device->connector_name[0] != '\0')
 +              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                      "%s: enclosure level(0x%04x), connector name( %s)\n",
 +                      ioc->name, __func__, pcie_device->enclosure_level,
 +                      pcie_device->connector_name));
 +
 +      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +      pcie_device_get(pcie_device);
 +      list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
 +      _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
 +      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +}
  /**
   * _scsih_raid_device_find_by_id - raid device search
   * @ioc: per adapter object
@@@ -1427,23 -1061,6 +1427,23 @@@ _scsih_is_end_device(u32 device_info
                return 0;
  }
  
 +/**
 + * _scsih_is_nvme_device - determines if device is an nvme device
 + * @device_info: bitfield providing information about the device.
 + * Context: none
 + *
 + * Returns 1 if nvme device.
 + */
 +static int
 +_scsih_is_nvme_device(u32 device_info)
 +{
 +      if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
 +                                      == MPI26_PCIE_DEVINFO_NVME)
 +              return 1;
 +      else
 +              return 0;
 +}
 +
  /**
   * _scsih_scsi_lookup_get - returns scmd entry
   * @ioc: per adapter object
@@@ -1661,7 -1278,6 +1661,7 @@@ scsih_target_alloc(struct scsi_target *
        struct MPT3SAS_TARGET *sas_target_priv_data;
        struct _sas_device *sas_device;
        struct _raid_device *raid_device;
 +      struct _pcie_device *pcie_device;
        unsigned long flags;
        struct sas_rphy *rphy;
  
                return 0;
        }
  
 +      /* PCIe devices */
 +      if (starget->channel == PCIE_CHANNEL) {
 +              spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +              pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
 +                      starget->channel);
 +              if (pcie_device) {
 +                      sas_target_priv_data->handle = pcie_device->handle;
 +                      sas_target_priv_data->sas_address = pcie_device->wwid;
 +                      sas_target_priv_data->pcie_dev = pcie_device;
 +                      pcie_device->starget = starget;
 +                      pcie_device->id = starget->id;
 +                      pcie_device->channel = starget->channel;
 +                      sas_target_priv_data->flags |=
 +                              MPT_TARGET_FLAGS_PCIE_DEVICE;
 +                      if (pcie_device->fast_path)
 +                              sas_target_priv_data->flags |=
 +                                      MPT_TARGET_FASTPATH_IO;
 +              }
 +              spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +              return 0;
 +      }
 +
        /* sas/sata devices */
        spin_lock_irqsave(&ioc->sas_device_lock, flags);
        rphy = dev_to_rphy(starget->dev.parent);
        if (sas_device) {
                sas_target_priv_data->handle = sas_device->handle;
                sas_target_priv_data->sas_address = sas_device->sas_address;
 -              sas_target_priv_data->sdev = sas_device;
 +              sas_target_priv_data->sas_dev = sas_device;
                sas_device->starget = starget;
                sas_device->id = starget->id;
                sas_device->channel = starget->channel;
                        sas_target_priv_data->flags |=
                            MPT_TARGET_FLAGS_RAID_COMPONENT;
                if (sas_device->fast_path)
 -                      sas_target_priv_data->flags |= MPT_TARGET_FASTPATH_IO;
 +                      sas_target_priv_data->flags |=
 +                                      MPT_TARGET_FASTPATH_IO;
        }
        spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  
@@@ -1752,9 -1345,7 +1752,9 @@@ scsih_target_destroy(struct scsi_targe
        struct MPT3SAS_TARGET *sas_target_priv_data;
        struct _sas_device *sas_device;
        struct _raid_device *raid_device;
 +      struct _pcie_device *pcie_device;
        unsigned long flags;
 +      struct sas_rphy *rphy;
  
        sas_target_priv_data = starget->hostdata;
        if (!sas_target_priv_data)
                goto out;
        }
  
 +      if (starget->channel == PCIE_CHANNEL) {
 +              spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +              pcie_device = __mpt3sas_get_pdev_from_target(ioc,
 +                                                      sas_target_priv_data);
 +              if (pcie_device && (pcie_device->starget == starget) &&
 +                      (pcie_device->id == starget->id) &&
 +                      (pcie_device->channel == starget->channel))
 +                      pcie_device->starget = NULL;
 +
 +              if (pcie_device) {
 +                      /*
 +                       * Corresponding get() is in _scsih_target_alloc()
 +                       */
 +                      sas_target_priv_data->pcie_dev = NULL;
 +                      pcie_device_put(pcie_device);
 +                      pcie_device_put(pcie_device);
 +              }
 +              spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +              goto out;
 +      }
 +
        spin_lock_irqsave(&ioc->sas_device_lock, flags);
 +      rphy = dev_to_rphy(starget->dev.parent);
        sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
        if (sas_device && (sas_device->starget == starget) &&
            (sas_device->id == starget->id) &&
                /*
                 * Corresponding get() is in _scsih_target_alloc()
                 */
 -              sas_target_priv_data->sdev = NULL;
 +              sas_target_priv_data->sas_dev = NULL;
                sas_device_put(sas_device);
  
                sas_device_put(sas_device);
@@@ -1834,7 -1403,6 +1834,7 @@@ scsih_slave_alloc(struct scsi_device *s
        struct scsi_target *starget;
        struct _raid_device *raid_device;
        struct _sas_device *sas_device;
 +      struct _pcie_device *pcie_device;
        unsigned long flags;
  
        sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
                        raid_device->sdev = sdev; /* raid is single lun */
                spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
        }
 +      if (starget->channel == PCIE_CHANNEL) {
 +              spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +              pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
 +                              sas_target_priv_data->sas_address);
 +              if (pcie_device && (pcie_device->starget == NULL)) {
 +                      sdev_printk(KERN_INFO, sdev,
 +                          "%s : pcie_device->starget set to starget @ %d\n",
 +                          __func__, __LINE__);
 +                      pcie_device->starget = starget;
 +              }
  
 -      if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
 +              if (pcie_device)
 +                      pcie_device_put(pcie_device);
 +              spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +
 +      } else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
                spin_lock_irqsave(&ioc->sas_device_lock, flags);
                sas_device = __mpt3sas_get_sdev_by_addr(ioc,
                                        sas_target_priv_data->sas_address);
@@@ -1912,7 -1466,6 +1912,7 @@@ scsih_slave_destroy(struct scsi_device 
        struct Scsi_Host *shost;
        struct MPT3SAS_ADAPTER *ioc;
        struct _sas_device *sas_device;
 +      struct _pcie_device *pcie_device;
        unsigned long flags;
  
        if (!sdev->hostdata)
        shost = dev_to_shost(&starget->dev);
        ioc = shost_priv(shost);
  
 -      if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
 +      if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
 +              spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +              pcie_device = __mpt3sas_get_pdev_from_target(ioc,
 +                              sas_target_priv_data);
 +              if (pcie_device && !sas_target_priv_data->num_luns)
 +                      pcie_device->starget = NULL;
 +
 +              if (pcie_device)
 +                      pcie_device_put(pcie_device);
 +
 +              spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +
 +      } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
                spin_lock_irqsave(&ioc->sas_device_lock, flags);
                sas_device = __mpt3sas_get_sdev_from_target(ioc,
                                sas_target_priv_data);
@@@ -2021,14 -1562,6 +2021,14 @@@ scsih_is_raid(struct device *dev
        return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
  }
  
 +static int
 +scsih_is_nvme(struct device *dev)
 +{
 +      struct scsi_device *sdev = to_scsi_device(dev);
 +
 +      return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
 +}
 +
  /**
   * scsih_get_resync - get raid volume resync percent complete
   * @dev the device struct object
@@@ -2304,7 -1837,6 +2304,7 @@@ scsih_slave_configure(struct scsi_devic
        struct MPT3SAS_DEVICE *sas_device_priv_data;
        struct MPT3SAS_TARGET *sas_target_priv_data;
        struct _sas_device *sas_device;
 +      struct _pcie_device *pcie_device;
        struct _raid_device *raid_device;
        unsigned long flags;
        int qdepth;
                }
        }
  
 -      spin_lock_irqsave(&ioc->sas_device_lock, flags);
 -      sas_device = __mpt3sas_get_sdev_by_addr(ioc,
 -         sas_device_priv_data->sas_target->sas_address);
 +      /* PCIe handling */
 +      if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
 +              spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +              pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
 +                              sas_device_priv_data->sas_target->sas_address);
 +              if (!pcie_device) {
 +                      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +                      dfailprintk(ioc, pr_warn(MPT3SAS_FMT
 +                              "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
 +                              __LINE__, __func__));
 +                      return 1;
 +              }
 +
 +              qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
 +              ds = "NVMe";
 +              sdev_printk(KERN_INFO, sdev,
 +                      "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
 +                      ds, handle, (unsigned long long)pcie_device->wwid,
 +                      pcie_device->port_num);
 +              if (pcie_device->enclosure_handle != 0)
 +                      sdev_printk(KERN_INFO, sdev,
 +                      "%s: enclosure logical id(0x%016llx), slot(%d)\n",
 +                      ds,
 +                      (unsigned long long)pcie_device->enclosure_logical_id,
 +                      pcie_device->slot);
 +              if (pcie_device->connector_name[0] != '\0')
 +                      sdev_printk(KERN_INFO, sdev,
 +                              "%s: enclosure level(0x%04x),"
 +                              "connector name( %s)\n", ds,
 +                              pcie_device->enclosure_level,
 +                              pcie_device->connector_name);
 +              pcie_device_put(pcie_device);
 +              spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +              scsih_change_queue_depth(sdev, qdepth);
 +
 +              if (pcie_device->nvme_mdts)
 +                      blk_queue_max_hw_sectors(sdev->request_queue,
 +                                      pcie_device->nvme_mdts/512);
 +              /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
 +               ** merged and can eliminate holes created during merging
 +               ** operation.
 +               **/
 +              queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES,
 +                              sdev->request_queue);
 +              blk_queue_virt_boundary(sdev->request_queue,
 +                              ioc->page_size - 1);
 +              return 0;
 +      }
 +
 +      spin_lock_irqsave(&ioc->sas_device_lock, flags);
 +      sas_device = __mpt3sas_get_sdev_by_addr(ioc,
 +         sas_device_priv_data->sas_target->sas_address);
        if (!sas_device) {
                spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
                dfailprintk(ioc, pr_warn(MPT3SAS_FMT
            "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
            ds, handle, (unsigned long long)sas_device->sas_address,
            sas_device->phy, (unsigned long long)sas_device->device_name);
 -      if (sas_device->enclosure_handle != 0)
 -              sdev_printk(KERN_INFO, sdev,
 -                   "%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
 -                   ds, (unsigned long long)
 -                   sas_device->enclosure_logical_id, sas_device->slot);
 -      if (sas_device->connector_name[0] != '\0')
 -              sdev_printk(KERN_INFO, sdev,
 -                   "%s: enclosure level(0x%04x), connector name( %s)\n",
 -                   ds, sas_device->enclosure_level,
 -                   sas_device->connector_name);
 +
 +      _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
  
        sas_device_put(sas_device);
        spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@@ -2909,7 -2400,6 +2909,7 @@@ _scsih_tm_display_info(struct MPT3SAS_A
        struct scsi_target *starget = scmd->device->sdev_target;
        struct MPT3SAS_TARGET *priv_target = starget->hostdata;
        struct _sas_device *sas_device = NULL;
 +      struct _pcie_device *pcie_device = NULL;
        unsigned long flags;
        char *device_str = NULL;
  
                        "%s handle(0x%04x), %s wwid(0x%016llx)\n",
                        device_str, priv_target->handle,
                    device_str, (unsigned long long)priv_target->sas_address);
 +
 +      } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
 +              spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +              pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
 +              if (pcie_device) {
 +                      starget_printk(KERN_INFO, starget,
 +                              "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
 +                              pcie_device->handle,
 +                              (unsigned long long)pcie_device->wwid,
 +                              pcie_device->port_num);
 +                      if (pcie_device->enclosure_handle != 0)
 +                              starget_printk(KERN_INFO, starget,
 +                                      "enclosure logical id(0x%016llx), slot(%d)\n",
 +                                      (unsigned long long)
 +                                      pcie_device->enclosure_logical_id,
 +                                      pcie_device->slot);
 +                      if (pcie_device->connector_name[0] != '\0')
 +                              starget_printk(KERN_INFO, starget,
 +                                      "enclosure level(0x%04x), connector name( %s)\n",
 +                                      pcie_device->enclosure_level,
 +                                      pcie_device->connector_name);
 +                      pcie_device_put(pcie_device);
 +              }
 +              spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +
        } else {
                spin_lock_irqsave(&ioc->sas_device_lock, flags);
                sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
                            sas_device->handle,
                            (unsigned long long)sas_device->sas_address,
                            sas_device->phy);
 -                      if (sas_device->enclosure_handle != 0)
 -                              starget_printk(KERN_INFO, starget,
 -                               "enclosure_logical_id(0x%016llx), slot(%d)\n",
 -                               (unsigned long long)
 -                               sas_device->enclosure_logical_id,
 -                               sas_device->slot);
 -                      if (sas_device->connector_name[0] != '\0')
 -                              starget_printk(KERN_INFO, starget,
 -                              "enclosure level(0x%04x),connector name(%s)\n",
 -                               sas_device->enclosure_level,
 -                               sas_device->connector_name);
 +
 +                      _scsih_display_enclosure_chassis_info(NULL, sas_device,
 +                          NULL, starget);
  
                        sas_device_put(sas_device);
                }
@@@ -3534,6 -3007,8 +3534,6 @@@ _scsih_block_io_device(struct MPT3SAS_A
        struct _sas_device *sas_device;
  
        sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
 -      if (!sas_device)
 -              return;
  
        shost_for_each_device(sdev, ioc->shost) {
                sas_device_priv_data = sdev->hostdata;
                        continue;
                if (sas_device_priv_data->block)
                        continue;
 -              if (sas_device->pend_sas_rphy_add)
 +              if (sas_device && sas_device->pend_sas_rphy_add)
                        continue;
                if (sas_device_priv_data->ignore_delay_remove) {
                        sdev_printk(KERN_INFO, sdev,
                _scsih_internal_device_block(sdev, sas_device_priv_data);
        }
  
 -      sas_device_put(sas_device);
 +      if (sas_device)
 +              sas_device_put(sas_device);
  }
  
  /**
@@@ -3638,33 -3112,6 +3638,33 @@@ _scsih_block_io_to_children_attached_di
        }
  }
  
 +/**
 + * _scsih_block_io_to_pcie_children_attached_directly
 + * @ioc: per adapter object
 + * @event_data: topology change event data
 + *
 + * This routine set sdev state to SDEV_BLOCK for all devices
 + * direct attached during device pull/reconnect.
 + */
 +static void
 +_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
 +              Mpi26EventDataPCIeTopologyChangeList_t *event_data)
 +{
 +      int i;
 +      u16 handle;
 +      u16 reason_code;
 +
 +      for (i = 0; i < event_data->NumEntries; i++) {
 +              handle =
 +                      le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
 +              if (!handle)
 +                      continue;
 +              reason_code = event_data->PortEntry[i].PortStatus;
 +              if (reason_code ==
 +                              MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
 +                      _scsih_block_io_device(ioc, handle);
 +      }
 +}
  /**
   * _scsih_tm_tr_send - send task management request
   * @ioc: per adapter object
@@@ -3686,14 -3133,18 +3686,14 @@@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTE
        Mpi2SCSITaskManagementRequest_t *mpi_request;
        u16 smid;
        struct _sas_device *sas_device = NULL;
 +      struct _pcie_device *pcie_device = NULL;
        struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
        u64 sas_address = 0;
        unsigned long flags;
        struct _tr_list *delayed_tr;
        u32 ioc_state;
  
 -      if (ioc->remove_host) {
 -              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 -                      "%s: host has been removed: handle(0x%04x)\n",
 -                      __func__, ioc->name, handle));
 -              return;
 -      } else if (ioc->pci_error_recovery) {
 +      if (ioc->pci_error_recovery) {
                dewtprintk(ioc, pr_info(MPT3SAS_FMT
                        "%s: host in pci error recovery: handle(0x%04x)\n",
                        __func__, ioc->name,
                sas_address = sas_device->sas_address;
        }
        spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 -
 +      if (!sas_device) {
 +              spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +              pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
 +              if (pcie_device && pcie_device->starget &&
 +                      pcie_device->starget->hostdata) {
 +                      sas_target_priv_data = pcie_device->starget->hostdata;
 +                      sas_target_priv_data->deleted = 1;
 +                      sas_address = pcie_device->wwid;
 +              }
 +              spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +      }
        if (sas_target_priv_data) {
                dewtprintk(ioc, pr_info(MPT3SAS_FMT
                        "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
                        ioc->name, handle,
                    (unsigned long long)sas_address));
 -              if (sas_device->enclosure_handle != 0)
 -                      dewtprintk(ioc, pr_info(MPT3SAS_FMT
 -                       "setting delete flag:enclosure logical id(0x%016llx),"
 -                       " slot(%d)\n", ioc->name, (unsigned long long)
 -                        sas_device->enclosure_logical_id,
 -                        sas_device->slot));
 -              if (sas_device->connector_name[0] != '\0')
 -                      dewtprintk(ioc, pr_info(MPT3SAS_FMT
 -                       "setting delete flag: enclosure level(0x%04x),"
 -                       " connector name( %s)\n", ioc->name,
 -                        sas_device->enclosure_level,
 -                        sas_device->connector_name));
 +              if (sas_device) {
 +                      if (sas_device->enclosure_handle != 0)
 +                              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                                  "setting delete flag:enclosure logical "
 +                                  "id(0x%016llx), slot(%d)\n", ioc->name,
 +                                  (unsigned long long)
 +                                  sas_device->enclosure_logical_id,
 +                                  sas_device->slot));
 +                      if (sas_device->connector_name[0] != '\0')
 +                              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                                  "setting delete flag: enclosure "
 +                                  "level(0x%04x), connector name( %s)\n",
 +                                  ioc->name, sas_device->enclosure_level,
 +                                  sas_device->connector_name));
 +              } else if (pcie_device) {
 +                      if (pcie_device->enclosure_handle != 0)
 +                              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                                  "setting delete flag: logical "
 +                                  "id(0x%016llx), slot(%d)\n", ioc->name,
 +                                  (unsigned long long)
 +                                  pcie_device->enclosure_logical_id,
 +                                  pcie_device->slot));
 +                      if (pcie_device->connector_name[0] != '\0')
 +                              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                                  "setting delete flag:, enclosure "
 +                                  "level(0x%04x), "
 +                                  "connector name( %s)\n", ioc->name,
 +                                  pcie_device->enclosure_level,
 +                                  pcie_device->connector_name));
 +              }
                _scsih_ublock_io_device(ioc, sas_address);
                sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
        }
  out:
        if (sas_device)
                sas_device_put(sas_device);
 +      if (pcie_device)
 +              pcie_device_put(pcie_device);
  }
  
  /**
@@@ -4309,81 -3730,6 +4309,81 @@@ _scsih_check_topo_delete_events(struct 
        spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  }
  
 +/**
 + * _scsih_check_pcie_topo_remove_events - sanity check on topo
 + * events
 + * @ioc: per adapter object
 + * @event_data: the event data payload
 + *
 + * This handles the case where driver receives multiple switch
 + * or device add and delete events in a single shot.  When there
 + * is a delete event the routine will void any pending add
 + * events waiting in the event queue.
 + *
 + * Return nothing.
 + */
 +static void
 +_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
 +      Mpi26EventDataPCIeTopologyChangeList_t *event_data)
 +{
 +      struct fw_event_work *fw_event;
 +      Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
 +      unsigned long flags;
 +      int i, reason_code;
 +      u16 handle, switch_handle;
 +
 +      for (i = 0; i < event_data->NumEntries; i++) {
 +              handle =
 +                      le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
 +              if (!handle)
 +                      continue;
 +              reason_code = event_data->PortEntry[i].PortStatus;
 +              if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
 +                      _scsih_tm_tr_send(ioc, handle);
 +      }
 +
 +      switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
 +      if (!switch_handle) {
 +              _scsih_block_io_to_pcie_children_attached_directly(
 +                                                      ioc, event_data);
 +              return;
 +      }
 +    /* TODO We are not supporting cascaded PCIe Switch removal yet*/
 +      if ((event_data->SwitchStatus
 +              == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
 +              (event_data->SwitchStatus ==
 +                                      MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
 +              _scsih_block_io_to_pcie_children_attached_directly(
 +                                                      ioc, event_data);
 +
 +      if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
 +              return;
 +
 +      /* mark ignore flag for pending events */
 +      spin_lock_irqsave(&ioc->fw_event_lock, flags);
 +      list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
 +              if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
 +                      fw_event->ignore)
 +                      continue;
 +              local_event_data =
 +                      (Mpi26EventDataPCIeTopologyChangeList_t *)
 +                      fw_event->event_data;
 +              if (local_event_data->SwitchStatus ==
 +                  MPI2_EVENT_SAS_TOPO_ES_ADDED ||
 +                  local_event_data->SwitchStatus ==
 +                  MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
 +                      if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
 +                              switch_handle) {
 +                              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                                      "setting ignoring flag for switch event\n",
 +                                      ioc->name));
 +                              fw_event->ignore = 1;
 +                      }
 +              }
 +      }
 +      spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 +}
 +
  /**
   * _scsih_set_volume_delete_flag - setting volume delete flag
   * @ioc: per adapter object
@@@ -4633,7 -3979,7 +4633,7 @@@ _scsih_flush_running_cmds(struct MPT3SA
   */
  static void
  _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
 -      Mpi2SCSIIORequest_t *mpi_request)
 +      Mpi25SCSIIORequest_t *mpi_request)
  {
        u16 eedp_flags;
        unsigned char prot_op = scsi_get_prot_op(scmd);
@@@ -4736,8 -4082,7 +4736,8 @@@ scsih_qcmd(struct Scsi_Host *shost, str
        struct _raid_device *raid_device;
        struct request *rq = scmd->request;
        int class;
 -      Mpi2SCSIIORequest_t *mpi_request;
 +      Mpi25SCSIIORequest_t *mpi_request;
 +      struct _pcie_device *pcie_device = NULL;
        u32 mpi_control;
        u16 smid;
        u16 handle;
        /* Make sure Device is not raid volume.
         * We do not expose raid functionality to upper layer for warpdrive.
         */
 -      if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)
 -          && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
 +      if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
 +              && !scsih_is_nvme(&scmd->device->sdev_gendev))
 +              && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
                mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
  
        smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
                goto out;
        }
        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
 -      memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
 +      memset(mpi_request, 0, ioc->request_sz);
        _scsih_setup_eedp(ioc, scmd, mpi_request);
  
        if (scmd->cmd_len == 32)
        mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
        mpi_request->SenseBufferLowAddress =
            mpt3sas_base_get_sense_buffer_dma(ioc, smid);
 -      mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
 +      mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
        int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
            mpi_request->LUN);
        memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
  
        if (mpi_request->DataLength) {
 -              if (ioc->build_sg_scmd(ioc, scmd, smid)) {
 +              pcie_device = sas_target_priv_data->pcie_dev;
 +              if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
                        mpt3sas_base_free_smid(ioc, smid);
                        goto out;
                }
  
        raid_device = sas_target_priv_data->raid_device;
        if (raid_device && raid_device->direct_io_enabled)
 -              mpt3sas_setup_direct_io(ioc, scmd, raid_device, mpi_request,
 -                  smid);
 +              mpt3sas_setup_direct_io(ioc, scmd,
 +                      raid_device, mpi_request, smid);
  
        if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
                if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
@@@ -4930,7 -4273,6 +4930,7 @@@ _scsih_scsi_ioc_info(struct MPT3SAS_ADA
        char *desc_scsi_state = ioc->tmp_string;
        u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
        struct _sas_device *sas_device = NULL;
 +      struct _pcie_device *pcie_device = NULL;
        struct scsi_target *starget = scmd->device->sdev_target;
        struct MPT3SAS_TARGET *priv_target = starget->hostdata;
        char *device_str = NULL;
        if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
                pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
                    device_str, (unsigned long long)priv_target->sas_address);
 +      } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
 +              pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
 +              if (pcie_device) {
 +                      pr_info(MPT3SAS_FMT "\twwid(0x%016llx), port(%d)\n",
 +                          ioc->name,
 +                          (unsigned long long)pcie_device->wwid,
 +                          pcie_device->port_num);
 +                      if (pcie_device->enclosure_handle != 0)
 +                              pr_info(MPT3SAS_FMT
 +                                  "\tenclosure logical id(0x%016llx), "
 +                                  "slot(%d)\n", ioc->name,
 +                                  (unsigned long long)
 +                                  pcie_device->enclosure_logical_id,
 +                                  pcie_device->slot);
 +                      if (pcie_device->connector_name[0])
 +                              pr_info(MPT3SAS_FMT
 +                                  "\tenclosure level(0x%04x),"
 +                                  "connector name( %s)\n",
 +                                  ioc->name, pcie_device->enclosure_level,
 +                                  pcie_device->connector_name);
 +                      pcie_device_put(pcie_device);
 +              }
        } else {
                sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
                if (sas_device) {
                                "\tsas_address(0x%016llx), phy(%d)\n",
                                ioc->name, (unsigned long long)
                            sas_device->sas_address, sas_device->phy);
 -                      if (sas_device->enclosure_handle != 0)
 -                              pr_warn(MPT3SAS_FMT
 -                                "\tenclosure_logical_id(0x%016llx),"
 -                                "slot(%d)\n", ioc->name,
 -                                (unsigned long long)
 -                                sas_device->enclosure_logical_id,
 -                                sas_device->slot);
 -                      if (sas_device->connector_name[0])
 -                              pr_warn(MPT3SAS_FMT
 -                                "\tenclosure level(0x%04x),"
 -                                " connector name( %s)\n", ioc->name,
 -                                sas_device->enclosure_level,
 -                                sas_device->connector_name);
 +
 +                      _scsih_display_enclosure_chassis_info(ioc, sas_device,
 +                          NULL, NULL);
  
                        sas_device_put(sas_device);
                }
                struct sense_info data;
                _scsih_normalize_sense(scmd->sense_buffer, &data);
                pr_warn(MPT3SAS_FMT
 -                      "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
 -                      ioc->name, data.skey,
 -                  data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
 +                "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
 +                ioc->name, data.skey,
 +                data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
        }
 -
        if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
                response_info = le32_to_cpu(mpi_reply->ResponseInfo);
                response_bytes = (u8 *)&response_info;
@@@ -5271,8 -4602,16 +5271,8 @@@ _scsih_smart_predicted_fault(struct MPT
           ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
                goto out_unlock;
  
 -      if (sas_device->enclosure_handle != 0)
 -              starget_printk(KERN_INFO, starget, "predicted fault, "
 -                      "enclosure logical id(0x%016llx), slot(%d)\n",
 -                      (unsigned long long)sas_device->enclosure_logical_id,
 -                      sas_device->slot);
 -      if (sas_device->connector_name[0] != '\0')
 -              starget_printk(KERN_WARNING, starget, "predicted fault, "
 -                      "enclosure level(0x%04x), connector name( %s)\n",
 -                      sas_device->enclosure_level,
 -                      sas_device->connector_name);
 +      _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
 +
        spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  
        if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
@@@ -5327,7 -4666,7 +5327,7 @@@ out_unlock
  static u8
  _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
  {
 -      Mpi2SCSIIORequest_t *mpi_request;
 +      Mpi25SCSIIORequest_t *mpi_request;
        Mpi2SCSIIOReply_t *mpi_reply;
        struct scsi_cmnd *scmd;
        u16 ioc_status;
                    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
        if (!sas_device_priv_data->tlr_snoop_check) {
                sas_device_priv_data->tlr_snoop_check++;
 -              if (!ioc->is_warpdrive &&
 +              if ((!ioc->is_warpdrive &&
                    !scsih_is_raid(&scmd->device->sdev_gendev) &&
 -                  sas_is_tlr_enabled(scmd->device) &&
 +                  !scsih_is_nvme(&scmd->device->sdev_gendev))
 +                  && sas_is_tlr_enabled(scmd->device) &&
                    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
                        sas_disable_tlr(scmd->device);
                        sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
                } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
                        scmd->result = DID_RESET << 16;
                        break;
 +              } else if ((scmd->device->channel == RAID_CHANNEL) &&
 +                 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
 +                 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
 +                      scmd->result = DID_RESET << 16;
 +                      break;
                }
                scmd->result = DID_SOFT_ERROR << 16;
                break;
@@@ -5941,6 -5274,8 +5941,6 @@@ mpt3sas_expander_remove(struct MPT3SAS_
        spin_lock_irqsave(&ioc->sas_node_lock, flags);
        sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
            sas_address);
 -      if (sas_expander)
 -              list_del(&sas_expander->list);
        spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
        if (sas_expander)
                _scsih_expander_node_remove(ioc, sas_expander);
@@@ -6050,52 -5385,6 +6050,52 @@@ _scsih_check_access_status(struct MPT3S
        return rc;
  }
  
 +/**
 + * _scsih_get_enclosure_logicalid_chassis_slot - get device's
 + *                    EnclosureLogicalID and ChassisSlot information.
 + * @ioc: per adapter object
 + * @sas_device_pg0: SAS device page0
 + * @sas_device: per sas device object
 + *
 + * Returns nothing.
 + */
 +static void
 +_scsih_get_enclosure_logicalid_chassis_slot(struct MPT3SAS_ADAPTER *ioc,
 +      Mpi2SasDevicePage0_t *sas_device_pg0, struct _sas_device *sas_device)
 +{
 +      Mpi2ConfigReply_t mpi_reply;
 +      Mpi2SasEnclosurePage0_t enclosure_pg0;
 +
 +      if (!sas_device_pg0 || !sas_device)
 +              return;
 +
 +      sas_device->enclosure_handle =
 +          le16_to_cpu(sas_device_pg0->EnclosureHandle);
 +      sas_device->is_chassis_slot_valid = 0;
 +
 +      if (!le16_to_cpu(sas_device_pg0->EnclosureHandle))
 +              return;
 +
 +      if (mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
 +          &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
 +          le16_to_cpu(sas_device_pg0->EnclosureHandle))) {
 +              pr_err(MPT3SAS_FMT
 +                  "Enclosure Pg0 read failed for handle(0x%04x)\n",
 +                  ioc->name, le16_to_cpu(sas_device_pg0->EnclosureHandle));
 +              return;
 +      }
 +
 +      sas_device->enclosure_logical_id =
 +          le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
 +
 +      if (le16_to_cpu(enclosure_pg0.Flags) &
 +          MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
 +              sas_device->is_chassis_slot_valid = 1;
 +              sas_device->chassis_slot = enclosure_pg0.ChassisSlot;
 +      }
 +}
 +
 +
  /**
   * _scsih_check_device - checking device responsiveness
   * @ioc: per adapter object
@@@ -6120,6 -5409,7 +6120,6 @@@ _scsih_check_device(struct MPT3SAS_ADAP
        struct MPT3SAS_TARGET *sas_target_priv_data;
        u32 device_info;
  
 -
        if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
            MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
                return;
                        sas_device->enclosure_level = 0;
                        sas_device->connector_name[0] = '\0';
                }
 +
 +              _scsih_get_enclosure_logicalid_chassis_slot(ioc,
 +                  &sas_device_pg0, sas_device);
        }
  
        /* check if device is present */
@@@ -6220,7 -5507,6 +6220,7 @@@ _scsih_add_device(struct MPT3SAS_ADAPTE
        u32 ioc_status;
        u64 sas_address;
        u32 device_info;
 +      int encl_pg0_rc = -1;
  
        if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
            MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
                return -1;
        }
  
 +      if (sas_device_pg0.EnclosureHandle) {
 +              encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
 +                  &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
 +                  sas_device_pg0.EnclosureHandle);
 +              if (encl_pg0_rc)
 +                      pr_info(MPT3SAS_FMT
 +                          "Enclosure Pg0 read failed for handle(0x%04x)\n",
 +                          ioc->name, sas_device_pg0.EnclosureHandle);
 +      }
 +
        sas_device = kzalloc(sizeof(struct _sas_device),
            GFP_KERNEL);
        if (!sas_device) {
                sas_device->enclosure_level = 0;
                sas_device->connector_name[0] = '\0';
        }
 -      /* get enclosure_logical_id */
 -      if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0(
 -         ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
 -         sas_device->enclosure_handle)))
 +
 +      /* get enclosure_logical_id & chassis_slot */
 +      sas_device->is_chassis_slot_valid = 0;
 +      if (encl_pg0_rc == 0) {
                sas_device->enclosure_logical_id =
                    le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
  
 +              if (le16_to_cpu(enclosure_pg0.Flags) &
 +                  MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
 +                      sas_device->is_chassis_slot_valid = 1;
 +                      sas_device->chassis_slot =
 +                          enclosure_pg0.ChassisSlot;
 +              }
 +      }
 +
        /* get device name */
        sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
  
@@@ -6357,15 -5625,23 +6357,15 @@@ _scsih_remove_device(struct MPT3SAS_ADA
                _scsih_turn_off_pfa_led(ioc, sas_device);
                sas_device->pfa_led_on = 0;
        }
 +
        dewtprintk(ioc, pr_info(MPT3SAS_FMT
                "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
                ioc->name, __func__,
            sas_device->handle, (unsigned long long)
            sas_device->sas_address));
 -      if (sas_device->enclosure_handle != 0)
 -              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 -                  "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
 -                  ioc->name, __func__,
 -                  (unsigned long long)sas_device->enclosure_logical_id,
 -                  sas_device->slot));
 -      if (sas_device->connector_name[0] != '\0')
 -              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 -                "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
 -                ioc->name, __func__,
 -                sas_device->enclosure_level,
 -                sas_device->connector_name));
 +
 +      dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
 +          NULL, NULL));
  
        if (sas_device->starget && sas_device->starget->hostdata) {
                sas_target_priv_data = sas_device->starget->hostdata;
                "removing handle(0x%04x), sas_addr(0x%016llx)\n",
                ioc->name, sas_device->handle,
            (unsigned long long) sas_device->sas_address);
 -      if (sas_device->enclosure_handle != 0)
 -              pr_info(MPT3SAS_FMT
 -                "removing : enclosure logical id(0x%016llx), slot(%d)\n",
 -                ioc->name,
 -                (unsigned long long)sas_device->enclosure_logical_id,
 -                sas_device->slot);
 -      if (sas_device->connector_name[0] != '\0')
 -              pr_info(MPT3SAS_FMT
 -                "removing enclosure level(0x%04x), connector name( %s)\n",
 -                ioc->name, sas_device->enclosure_level,
 -                sas_device->connector_name);
 +
 +      _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
  
        dewtprintk(ioc, pr_info(MPT3SAS_FMT
                "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
                ioc->name, __func__,
                sas_device->handle, (unsigned long long)
                sas_device->sas_address));
 -      if (sas_device->enclosure_handle != 0)
 -              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 -                  "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
 -                  ioc->name, __func__,
 -                  (unsigned long long)sas_device->enclosure_logical_id,
 -                  sas_device->slot));
 -      if (sas_device->connector_name[0] != '\0')
 -              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 -                  "%s: exit: enclosure level(0x%04x), connector name(%s)\n",
 -                  ioc->name, __func__, sas_device->enclosure_level,
 -                  sas_device->connector_name));
 +      dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
 +          NULL, NULL));
  }
  
  /**
@@@ -6598,794 -5892,85 +6598,794 @@@ _scsih_sas_topology_change_event(struc
                        break;
                }
        }
 -
 -      /* handle expander removal */
 -      if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
 -          sas_expander)
 -              mpt3sas_expander_remove(ioc, sas_address);
 -
 -      return 0;
 +
 +      /* handle expander removal */
 +      if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
 +          sas_expander)
 +              mpt3sas_expander_remove(ioc, sas_address);
 +
 +      return 0;
 +}
 +
 +/**
 + * _scsih_sas_device_status_change_event_debug - debug for device event
 + * @event_data: event data payload
 + * Context: user.
 + *
 + * Return nothing.
 + */
 +static void
 +_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
 +      Mpi2EventDataSasDeviceStatusChange_t *event_data)
 +{
 +      char *reason_str = NULL;
 +
 +      switch (event_data->ReasonCode) {
 +      case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
 +              reason_str = "smart data";
 +              break;
 +      case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
 +              reason_str = "unsupported device discovered";
 +              break;
 +      case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
 +              reason_str = "internal device reset";
 +              break;
 +      case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
 +              reason_str = "internal task abort";
 +              break;
 +      case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
 +              reason_str = "internal task abort set";
 +              break;
 +      case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
 +              reason_str = "internal clear task set";
 +              break;
 +      case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
 +              reason_str = "internal query task";
 +              break;
 +      case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
 +              reason_str = "sata init failure";
 +              break;
 +      case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
 +              reason_str = "internal device reset complete";
 +              break;
 +      case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
 +              reason_str = "internal task abort complete";
 +              break;
 +      case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
 +              reason_str = "internal async notification";
 +              break;
 +      case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
 +              reason_str = "expander reduced functionality";
 +              break;
 +      case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
 +              reason_str = "expander reduced functionality complete";
 +              break;
 +      default:
 +              reason_str = "unknown reason";
 +              break;
 +      }
 +      pr_info(MPT3SAS_FMT "device status change: (%s)\n"
 +          "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
 +          ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
 +          (unsigned long long)le64_to_cpu(event_data->SASAddress),
 +          le16_to_cpu(event_data->TaskTag));
 +      if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
 +              pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
 +                  event_data->ASC, event_data->ASCQ);
 +      pr_info("\n");
 +}
 +
 +/**
 + * _scsih_sas_device_status_change_event - handle device status change
 + * @ioc: per adapter object
 + * @fw_event: The fw_event_work object
 + * Context: user.
 + *
 + * Return nothing.
 + */
 +static void
 +_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
 +      struct fw_event_work *fw_event)
 +{
 +      struct MPT3SAS_TARGET *target_priv_data;
 +      struct _sas_device *sas_device;
 +      u64 sas_address;
 +      unsigned long flags;
 +      Mpi2EventDataSasDeviceStatusChange_t *event_data =
 +              (Mpi2EventDataSasDeviceStatusChange_t *)
 +              fw_event->event_data;
 +
 +      if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
 +              _scsih_sas_device_status_change_event_debug(ioc,
 +                   event_data);
 +
 +      /* In MPI Revision K (0xC), the internal device reset complete was
 +       * implemented, so avoid setting tm_busy flag for older firmware.
 +       */
 +      if ((ioc->facts.HeaderVersion >> 8) < 0xC)
 +              return;
 +
 +      if (event_data->ReasonCode !=
 +          MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
 +         event_data->ReasonCode !=
 +          MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
 +              return;
 +
 +      spin_lock_irqsave(&ioc->sas_device_lock, flags);
 +      sas_address = le64_to_cpu(event_data->SASAddress);
 +      sas_device = __mpt3sas_get_sdev_by_addr(ioc,
 +          sas_address);
 +
 +      if (!sas_device || !sas_device->starget)
 +              goto out;
 +
 +      target_priv_data = sas_device->starget->hostdata;
 +      if (!target_priv_data)
 +              goto out;
 +
 +      if (event_data->ReasonCode ==
 +          MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
 +              target_priv_data->tm_busy = 1;
 +      else
 +              target_priv_data->tm_busy = 0;
 +
 +out:
 +      if (sas_device)
 +              sas_device_put(sas_device);
 +
 +      spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 +}
 +
 +
 +/**
 + * _scsih_check_pcie_access_status - check access flags
 + * @ioc: per adapter object
 + * @wwid: wwid
 + * @handle: sas device handle
 + * @access_flags: errors returned during discovery of the device
 + *
 + * Return 0 for success, else failure
 + */
 +static u8
 +_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
 +      u16 handle, u8 access_status)
 +{
 +      u8 rc = 1;
 +      char *desc = NULL;
 +
 +      switch (access_status) {
 +      case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
 +      case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
 +              rc = 0;
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
 +              desc = "PCIe device capability failed";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
 +              desc = "PCIe device blocked";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
 +              desc = "PCIe device mem space access failed";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
 +              desc = "PCIe device unsupported";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
 +              desc = "PCIe device MSIx Required";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
 +              desc = "PCIe device init fail max";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
 +              desc = "PCIe device status unknown";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
 +              desc = "nvme ready timeout";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
 +              desc = "nvme device configuration unsupported";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
 +              desc = "nvme identify failed";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
 +              desc = "nvme qconfig failed";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
 +              desc = "nvme qcreation failed";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
 +              desc = "nvme eventcfg failed";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
 +              desc = "nvme get feature stat failed";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
 +              desc = "nvme idle timeout";
 +              break;
 +      case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
 +              desc = "nvme failure status";
 +              break;
 +      default:
 +              pr_err(MPT3SAS_FMT
 +                  " NVMe discovery error(0x%02x): wwid(0x%016llx),"
 +                      "handle(0x%04x)\n", ioc->name, access_status,
 +                      (unsigned long long)wwid, handle);
 +              return rc;
 +      }
 +
 +      if (!rc)
 +              return rc;
 +
 +      pr_info(MPT3SAS_FMT
 +              "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
 +                      ioc->name, desc,
 +                      (unsigned long long)wwid, handle);
 +      return rc;
 +}
 +
 +/**
 + * _scsih_pcie_device_remove_from_sml -  removing pcie device
 + * from SML and free up associated memory
 + * @ioc: per adapter object
 + * @pcie_device: the pcie_device object
 + *
 + * Return nothing.
 + */
 +static void
 +_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
 +      struct _pcie_device *pcie_device)
 +{
 +      struct MPT3SAS_TARGET *sas_target_priv_data;
 +
 +      dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +          "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
 +          pcie_device->handle, (unsigned long long)
 +          pcie_device->wwid));
 +      if (pcie_device->enclosure_handle != 0)
 +              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                  "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
 +                  ioc->name, __func__,
 +                  (unsigned long long)pcie_device->enclosure_logical_id,
 +                  pcie_device->slot));
 +      if (pcie_device->connector_name[0] != '\0')
 +              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                  "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
 +                  ioc->name, __func__,
 +                  pcie_device->enclosure_level,
 +                  pcie_device->connector_name));
 +
 +      if (pcie_device->starget && pcie_device->starget->hostdata) {
 +              sas_target_priv_data = pcie_device->starget->hostdata;
 +              sas_target_priv_data->deleted = 1;
 +              _scsih_ublock_io_device(ioc, pcie_device->wwid);
 +              sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
 +      }
 +
 +      pr_info(MPT3SAS_FMT
 +              "removing handle(0x%04x), wwid (0x%016llx)\n",
 +              ioc->name, pcie_device->handle,
 +              (unsigned long long) pcie_device->wwid);
 +      if (pcie_device->enclosure_handle != 0)
 +              pr_info(MPT3SAS_FMT
 +                  "removing : enclosure logical id(0x%016llx), slot(%d)\n",
 +                  ioc->name,
 +                  (unsigned long long)pcie_device->enclosure_logical_id,
 +                  pcie_device->slot);
 +      if (pcie_device->connector_name[0] != '\0')
 +              pr_info(MPT3SAS_FMT
 +                  "removing: enclosure level(0x%04x), connector name( %s)\n",
 +                  ioc->name, pcie_device->enclosure_level,
 +                  pcie_device->connector_name);
 +
 +      if (pcie_device->starget)
 +              scsi_remove_target(&pcie_device->starget->dev);
 +      dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +          "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
 +          pcie_device->handle, (unsigned long long)
 +          pcie_device->wwid));
 +      if (pcie_device->enclosure_handle != 0)
 +              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                      "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
 +                      ioc->name, __func__,
 +                      (unsigned long long)pcie_device->enclosure_logical_id,
 +                      pcie_device->slot));
 +      if (pcie_device->connector_name[0] != '\0')
 +              dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                  "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
 +                  ioc->name, __func__, pcie_device->enclosure_level,
 +                  pcie_device->connector_name));
 +
 +      kfree(pcie_device->serial_number);
 +}
 +
 +
 +/**
 + * _scsih_pcie_check_device - checking device responsiveness
 + * @ioc: per adapter object
 + * @handle: attached device handle
 + *
 + * Returns nothing.
 + */
 +static void
 +_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 +{
 +      Mpi2ConfigReply_t mpi_reply;
 +      Mpi26PCIeDevicePage0_t pcie_device_pg0;
 +      u32 ioc_status;
 +      struct _pcie_device *pcie_device;
 +      u64 wwid;
 +      unsigned long flags;
 +      struct scsi_target *starget;
 +      struct MPT3SAS_TARGET *sas_target_priv_data;
 +      u32 device_info;
 +
 +      if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
 +              &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
 +              return;
 +
 +      ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
 +      if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
 +              return;
 +
 +      /* check if this is end device */
 +      device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
 +      if (!(_scsih_is_nvme_device(device_info)))
 +              return;
 +
 +      wwid = le64_to_cpu(pcie_device_pg0.WWID);
 +      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +      pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
 +
 +      if (!pcie_device) {
 +              spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +              return;
 +      }
 +
 +      if (unlikely(pcie_device->handle != handle)) {
 +              starget = pcie_device->starget;
 +              sas_target_priv_data = starget->hostdata;
 +              starget_printk(KERN_INFO, starget,
 +                  "handle changed from(0x%04x) to (0x%04x)!!!\n",
 +                  pcie_device->handle, handle);
 +              sas_target_priv_data->handle = handle;
 +              pcie_device->handle = handle;
 +
 +              if (le32_to_cpu(pcie_device_pg0.Flags) &
 +                  MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
 +                      pcie_device->enclosure_level =
 +                          pcie_device_pg0.EnclosureLevel;
 +                      memcpy(&pcie_device->connector_name[0],
 +                          &pcie_device_pg0.ConnectorName[0], 4);
 +              } else {
 +                      pcie_device->enclosure_level = 0;
 +                      pcie_device->connector_name[0] = '\0';
 +              }
 +      }
 +
 +      /* check if device is present */
 +      if (!(le32_to_cpu(pcie_device_pg0.Flags) &
 +          MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
 +              pr_info(MPT3SAS_FMT
 +                  "device is not present handle(0x%04x), flags!!!\n",
 +                  ioc->name, handle);
 +              spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +              pcie_device_put(pcie_device);
 +              return;
 +      }
 +
 +      /* check if there were any issues with discovery */
 +      if (_scsih_check_pcie_access_status(ioc, wwid, handle,
 +          pcie_device_pg0.AccessStatus)) {
 +              spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +              pcie_device_put(pcie_device);
 +              return;
 +      }
 +
 +      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +      pcie_device_put(pcie_device);
 +
 +      _scsih_ublock_io_device(ioc, wwid);
 +
 +      return;
 +}
 +
 +/**
 + * _scsih_pcie_add_device -  creating pcie device object
 + * @ioc: per adapter object
 + * @handle: pcie device handle
 + *
 + * Creating end device object, stored in ioc->pcie_device_list.
 + *
 + * Return 1 means queue the event later, 0 means complete the event
 + */
 +static int
 +_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 +{
 +      Mpi26PCIeDevicePage0_t pcie_device_pg0;
 +      Mpi26PCIeDevicePage2_t pcie_device_pg2;
 +      Mpi2ConfigReply_t mpi_reply;
 +      Mpi2SasEnclosurePage0_t enclosure_pg0;
 +      struct _pcie_device *pcie_device;
 +      u32 pcie_device_type;
 +      u32 ioc_status;
 +      u64 wwid;
 +
 +      if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
 +          &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
 +              pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
 +                  ioc->name, __FILE__, __LINE__, __func__);
 +              return 0;
 +      }
 +      ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 +          MPI2_IOCSTATUS_MASK;
 +      if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
 +              pr_err(MPT3SAS_FMT
 +                  "failure at %s:%d/%s()!\n",
 +                  ioc->name, __FILE__, __LINE__, __func__);
 +              return 0;
 +      }
 +
 +      set_bit(handle, ioc->pend_os_device_add);
 +      wwid = le64_to_cpu(pcie_device_pg0.WWID);
 +
 +      /* check if device is present */
 +      if (!(le32_to_cpu(pcie_device_pg0.Flags) &
 +              MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
 +              pr_err(MPT3SAS_FMT
 +                  "device is not present handle(0x04%x)!!!\n",
 +                  ioc->name, handle);
 +              return 0;
 +      }
 +
 +      /* check if there were any issues with discovery */
 +      if (_scsih_check_pcie_access_status(ioc, wwid, handle,
 +          pcie_device_pg0.AccessStatus))
 +              return 0;
 +
 +      if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo))))
 +              return 0;
 +
 +      pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
 +      if (pcie_device) {
 +              clear_bit(handle, ioc->pend_os_device_add);
 +              pcie_device_put(pcie_device);
 +              return 0;
 +      }
 +
 +      pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
 +      if (!pcie_device) {
 +              pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
 +                      ioc->name, __FILE__, __LINE__, __func__);
 +              return 0;
 +      }
 +
 +      kref_init(&pcie_device->refcount);
 +      pcie_device->id = ioc->pcie_target_id++;
 +      pcie_device->channel = PCIE_CHANNEL;
 +      pcie_device->handle = handle;
 +      pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
 +      pcie_device->wwid = wwid;
 +      pcie_device->port_num = pcie_device_pg0.PortNum;
 +      pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
 +          MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
 +      pcie_device_type = pcie_device->device_info &
 +          MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE;
 +
 +      pcie_device->enclosure_handle =
 +          le16_to_cpu(pcie_device_pg0.EnclosureHandle);
 +      if (pcie_device->enclosure_handle != 0)
 +              pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
 +
 +      if (le16_to_cpu(pcie_device_pg0.Flags) &
 +          MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
 +              pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
 +              memcpy(&pcie_device->connector_name[0],
 +                  &pcie_device_pg0.ConnectorName[0], 4);
 +      } else {
 +              pcie_device->enclosure_level = 0;
 +              pcie_device->connector_name[0] = '\0';
 +      }
 +
 +      /* get enclosure_logical_id */
 +      if (pcie_device->enclosure_handle &&
 +              !(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
 +                      &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
 +                      pcie_device->enclosure_handle)))
 +              pcie_device->enclosure_logical_id =
 +                      le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
 +
 +      /* TODO -- Add device name once FW supports it */
 +      if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
 +              &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
 +              pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
 +                              ioc->name, __FILE__, __LINE__, __func__);
 +              kfree(pcie_device);
 +              return 0;
 +      }
 +
 +      ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
 +      if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
 +              pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
 +                      ioc->name, __FILE__, __LINE__, __func__);
 +              kfree(pcie_device);
 +              return 0;
 +      }
 +      pcie_device->nvme_mdts =
 +              le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
 +
 +      if (ioc->wait_for_discovery_to_complete)
 +              _scsih_pcie_device_init_add(ioc, pcie_device);
 +      else
 +              _scsih_pcie_device_add(ioc, pcie_device);
 +
 +      pcie_device_put(pcie_device);
 +      return 0;
 +}
 +
 +/**
 + * _scsih_pcie_topology_change_event_debug - debug for topology
 + * event
 + * @ioc: per adapter object
 + * @event_data: event data payload
 + * Context: user.
 + */
 +static void
 +_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
 +      Mpi26EventDataPCIeTopologyChangeList_t *event_data)
 +{
 +      int i;
 +      u16 handle;
 +      u16 reason_code;
 +      u8 port_number;
 +      char *status_str = NULL;
 +      u8 link_rate, prev_link_rate;
 +
 +      switch (event_data->SwitchStatus) {
 +      case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
 +              status_str = "add";
 +              break;
 +      case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
 +              status_str = "remove";
 +              break;
 +      case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
 +      case 0:
 +              status_str =  "responding";
 +              break;
 +      case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
 +              status_str = "remove delay";
 +              break;
 +      default:
 +              status_str = "unknown status";
 +              break;
 +      }
 +      pr_info(MPT3SAS_FMT "pcie topology change: (%s)\n",
 +              ioc->name, status_str);
 +      pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
 +              "start_port(%02d), count(%d)\n",
 +              le16_to_cpu(event_data->SwitchDevHandle),
 +              le16_to_cpu(event_data->EnclosureHandle),
 +              event_data->StartPortNum, event_data->NumEntries);
 +      for (i = 0; i < event_data->NumEntries; i++) {
 +              handle =
 +                      le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
 +              if (!handle)
 +                      continue;
 +              port_number = event_data->StartPortNum + i;
 +              reason_code = event_data->PortEntry[i].PortStatus;
 +              switch (reason_code) {
 +              case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
 +                      status_str = "target add";
 +                      break;
 +              case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
 +                      status_str = "target remove";
 +                      break;
 +              case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
 +                      status_str = "delay target remove";
 +                      break;
 +              case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
 +                      status_str = "link rate change";
 +                      break;
 +              case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
 +                      status_str = "target responding";
 +                      break;
 +              default:
 +                      status_str = "unknown";
 +                      break;
 +              }
 +              link_rate = event_data->PortEntry[i].CurrentPortInfo &
 +                      MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
 +              prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
 +                      MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
 +              pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
 +                      " link rate: new(0x%02x), old(0x%02x)\n", port_number,
 +                      handle, status_str, link_rate, prev_link_rate);
 +      }
 +}
 +
 +/**
 + * _scsih_pcie_topology_change_event - handle PCIe topology
 + *  changes
 + * @ioc: per adapter object
 + * @fw_event: The fw_event_work object
 + * Context: user.
 + *
 + */
 +static int
 +_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
 +      struct fw_event_work *fw_event)
 +{
 +      int i;
 +      u16 handle;
 +      u16 reason_code;
 +      u8 link_rate, prev_link_rate;
 +      unsigned long flags;
 +      int rc;
 +      int requeue_event;
 +      Mpi26EventDataPCIeTopologyChangeList_t *event_data =
 +              (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
 +      struct _pcie_device *pcie_device;
 +
 +      if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
 +              _scsih_pcie_topology_change_event_debug(ioc, event_data);
 +
 +      if (ioc->shost_recovery || ioc->remove_host ||
 +              ioc->pci_error_recovery)
 +              return 0;
 +
 +      if (fw_event->ignore) {
 +              dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
 +                      ioc->name));
 +              return 0;
 +      }
 +
 +      /* handle siblings events */
 +      for (i = 0; i < event_data->NumEntries; i++) {
 +              if (fw_event->ignore) {
 +                      dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                              "ignoring switch event\n", ioc->name));
 +                      return 0;
 +              }
 +              if (ioc->remove_host || ioc->pci_error_recovery)
 +                      return 0;
 +              reason_code = event_data->PortEntry[i].PortStatus;
 +              handle =
 +                      le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
 +              if (!handle)
 +                      continue;
 +
 +              link_rate = event_data->PortEntry[i].CurrentPortInfo
 +                      & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
 +              prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
 +                      & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
 +
 +              switch (reason_code) {
 +              case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
 +                      if (ioc->shost_recovery)
 +                              break;
 +                      if (link_rate == prev_link_rate)
 +                              break;
 +                      if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
 +                              break;
 +
 +                      _scsih_pcie_check_device(ioc, handle);
 +
 +                      /* This code after this point handles the test case
 +                       * where a device has been added, however its returning
 +                       * BUSY for sometime.  Then before the Device Missing
 +                       * Delay expires and the device becomes READY, the
 +                       * device is removed and added back.
 +                       */
 +                      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +                      pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
 +                      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +
 +                      if (pcie_device) {
 +                              pcie_device_put(pcie_device);
 +                              break;
 +                      }
 +
 +                      if (!test_bit(handle, ioc->pend_os_device_add))
 +                              break;
 +
 +                      dewtprintk(ioc, pr_info(MPT3SAS_FMT
 +                              "handle(0x%04x) device not found: convert "
 +                              "event to a device add\n", ioc->name, handle));
 +                      event_data->PortEntry[i].PortStatus &= 0xF0;
 +                      event_data->PortEntry[i].PortStatus |=
 +                              MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
 +              case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
 +                      if (ioc->shost_recovery)
 +                              break;
 +                      if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
 +                              break;
 +
 +                      rc = _scsih_pcie_add_device(ioc, handle);
 +                      if (!rc) {
 +                              /* mark entry vacant */
 +                              /* TODO This needs to be reviewed and fixed,
 +                               * we dont have an entry
 +                               * to make an event void like vacant
 +                               */
 +                              event_data->PortEntry[i].PortStatus |=
 +                                      MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
 +                      }
 +                      break;
 +              case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
 +                      _scsih_pcie_device_remove_by_handle(ioc, handle);
 +                      break;
 +              }
 +      }
 +      return requeue_event;
  }
  
  /**
 - * _scsih_sas_device_status_change_event_debug - debug for device event
 + * _scsih_pcie_device_status_change_event_debug - debug for
 + * device event
   * @event_data: event data payload
   * Context: user.
   *
   * Return nothing.
   */
  static void
 -_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
 -      Mpi2EventDataSasDeviceStatusChange_t *event_data)
 +_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
 +      Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
  {
        char *reason_str = NULL;
  
        switch (event_data->ReasonCode) {
 -      case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
 +      case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
                reason_str = "smart data";
                break;
 -      case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
 +      case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
                reason_str = "unsupported device discovered";
                break;
 -      case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
 +      case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
                reason_str = "internal device reset";
                break;
 -      case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
 +      case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
                reason_str = "internal task abort";
                break;
 -      case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
 +      case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
                reason_str = "internal task abort set";
                break;
 -      case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
 +      case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
                reason_str = "internal clear task set";
                break;
 -      case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
 +      case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
                reason_str = "internal query task";
                break;
 -      case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
 -              reason_str = "sata init failure";
 +      case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
 +              reason_str = "device init failure";
                break;
 -      case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
 +      case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
                reason_str = "internal device reset complete";
                break;
 -      case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
 +      case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
                reason_str = "internal task abort complete";
                break;
 -      case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
 +      case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
                reason_str = "internal async notification";
                break;
 -      case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
 -              reason_str = "expander reduced functionality";
 -              break;
 -      case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
 -              reason_str = "expander reduced functionality complete";
 -              break;
        default:
                reason_str = "unknown reason";
                break;
        }
 -      pr_info(MPT3SAS_FMT "device status change: (%s)\n"
 -          "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
 -          ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
 -          (unsigned long long)le64_to_cpu(event_data->SASAddress),
 -          le16_to_cpu(event_data->TaskTag));
 -      if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
 +
 +      pr_info(MPT3SAS_FMT "PCIE device status change: (%s)\n"
 +              "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
 +              ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
 +              (unsigned long long)le64_to_cpu(event_data->WWID),
 +              le16_to_cpu(event_data->TaskTag));
 +      if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
                pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
 -                  event_data->ASC, event_data->ASCQ);
 +                      event_data->ASC, event_data->ASCQ);
        pr_info("\n");
  }
  
  /**
 - * _scsih_sas_device_status_change_event - handle device status change
 + * _scsih_pcie_device_status_change_event - handle device status
 + * change
   * @ioc: per adapter object
   * @fw_event: The fw_event_work object
   * Context: user.
   * Return nothing.
   */
  static void
 -_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
 +_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
        struct fw_event_work *fw_event)
  {
        struct MPT3SAS_TARGET *target_priv_data;
 -      struct _sas_device *sas_device;
 -      u64 sas_address;
 +      struct _pcie_device *pcie_device;
 +      u64 wwid;
        unsigned long flags;
 -      Mpi2EventDataSasDeviceStatusChange_t *event_data =
 -              (Mpi2EventDataSasDeviceStatusChange_t *)
 -              fw_event->event_data;
 -
 +      Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
 +              (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
        if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
 -              _scsih_sas_device_status_change_event_debug(ioc,
 -                   event_data);
 -
 -      /* In MPI Revision K (0xC), the internal device reset complete was
 -       * implemented, so avoid setting tm_busy flag for older firmware.
 -       */
 -      if ((ioc->facts.HeaderVersion >> 8) < 0xC)
 -              return;
 +              _scsih_pcie_device_status_change_event_debug(ioc,
 +                      event_data);
  
        if (event_data->ReasonCode !=
 -          MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
 -         event_data->ReasonCode !=
 -          MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
 +              MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
 +              event_data->ReasonCode !=
 +              MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
                return;
  
 -      spin_lock_irqsave(&ioc->sas_device_lock, flags);
 -      sas_address = le64_to_cpu(event_data->SASAddress);
 -      sas_device = __mpt3sas_get_sdev_by_addr(ioc,
 -          sas_address);
 +      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +      wwid = le64_to_cpu(event_data->WWID);
 +      pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
  
 -      if (!sas_device || !sas_device->starget)
 +      if (!pcie_device || !pcie_device->starget)
                goto out;
  
 -      target_priv_data = sas_device->starget->hostdata;
 +      target_priv_data = pcie_device->starget->hostdata;
        if (!target_priv_data)
                goto out;
  
        if (event_data->ReasonCode ==
 -          MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
 +              MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
                target_priv_data->tm_busy = 1;
        else
                target_priv_data->tm_busy = 0;
 -
  out:
 -      if (sas_device)
 -              sas_device_put(sas_device);
 -
 -      spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 +      if (pcie_device)
 +              pcie_device_put(pcie_device);
  
 +      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  }
  
  /**
@@@ -7685,35 -6281,6 +7685,35 @@@ _scsih_sas_discovery_event(struct MPT3S
        }
  }
  
 +/**
 + * _scsih_pcie_enumeration_event - handle enumeration events
 + * @ioc: per adapter object
 + * @fw_event: The fw_event_work object
 + * Context: user.
 + *
 + * Return nothing.
 + */
 +static void
 +_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
 +      struct fw_event_work *fw_event)
 +{
 +      Mpi26EventDataPCIeEnumeration_t *event_data =
 +              (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
 +
 +      if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
 +              return;
 +
 +      pr_info(MPT3SAS_FMT "pcie enumeration event: (%s) Flag 0x%02x",
 +              ioc->name,
 +              (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
 +                      "started" : "completed",
 +              event_data->Flags);
 +      if (event_data->EnumerationStatus)
 +              pr_cont("enumeration_status(0x%08x)",
 +                      le32_to_cpu(event_data->EnumerationStatus));
 +      pr_cont("\n");
 +}
 +
  /**
   * _scsih_ir_fastpath - turn on fastpath for IR physdisk
   * @ioc: per adapter object
@@@ -8518,7 -7085,7 +8518,7 @@@ Mpi2SasDevicePage0_t *sas_device_pg0
  {
        struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
        struct scsi_target *starget;
 -      struct _sas_device *sas_device;
 +      struct _sas_device *sas_device = NULL;
        unsigned long flags;
  
        spin_lock_irqsave(&ioc->sas_device_lock, flags);
                                sas_device->connector_name[0] = '\0';
                        }
  
 +                      _scsih_get_enclosure_logicalid_chassis_slot(ioc,
 +                          sas_device_pg0, sas_device);
 +
                        if (sas_device->handle == sas_device_pg0->DevHandle)
                                goto out;
                        pr_info("\thandle changed from(0x%04x)!!!\n",
@@@ -8625,130 -7189,6 +8625,130 @@@ _scsih_search_responding_sas_devices(st
            ioc->name);
  }
  
 +/**
 + * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
 + * @ioc: per adapter object
 + * @pcie_device_pg0: PCIe Device page 0
 + *
 + * After host reset, find out whether devices are still responding.
 + * Used in _scsih_remove_unresponding_devices.
 + *
 + * Return nothing.
 + */
 +static void
 +_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
 +      Mpi26PCIeDevicePage0_t *pcie_device_pg0)
 +{
 +      struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
 +      struct scsi_target *starget;
 +      struct _pcie_device *pcie_device;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +      list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
 +              if ((pcie_device->wwid == pcie_device_pg0->WWID) &&
 +                  (pcie_device->slot == pcie_device_pg0->Slot)) {
 +                      pcie_device->responding = 1;
 +                      starget = pcie_device->starget;
 +                      if (starget && starget->hostdata) {
 +                              sas_target_priv_data = starget->hostdata;
 +                              sas_target_priv_data->tm_busy = 0;
 +                              sas_target_priv_data->deleted = 0;
 +                      } else
 +                              sas_target_priv_data = NULL;
 +                      if (starget) {
 +                              starget_printk(KERN_INFO, starget,
 +                                  "handle(0x%04x), wwid(0x%016llx) ",
 +                                  pcie_device->handle,
 +                                  (unsigned long long)pcie_device->wwid);
 +                              if (pcie_device->enclosure_handle != 0)
 +                                      starget_printk(KERN_INFO, starget,
 +                                          "enclosure logical id(0x%016llx), "
 +                                          "slot(%d)\n",
 +                                          (unsigned long long)
 +                                          pcie_device->enclosure_logical_id,
 +                                          pcie_device->slot);
 +                      }
 +
 +                      if (((le32_to_cpu(pcie_device_pg0->Flags)) &
 +                          MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
 +                          (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
 +                              pcie_device->enclosure_level =
 +                                  pcie_device_pg0->EnclosureLevel;
 +                              memcpy(&pcie_device->connector_name[0],
 +                                  &pcie_device_pg0->ConnectorName[0], 4);
 +                      } else {
 +                              pcie_device->enclosure_level = 0;
 +                              pcie_device->connector_name[0] = '\0';
 +                      }
 +
 +                      if (pcie_device->handle == pcie_device_pg0->DevHandle)
 +                              goto out;
 +                      pr_info("\thandle changed from(0x%04x)!!!\n",
 +                          pcie_device->handle);
 +                      pcie_device->handle = pcie_device_pg0->DevHandle;
 +                      if (sas_target_priv_data)
 +                              sas_target_priv_data->handle =
 +                                  pcie_device_pg0->DevHandle;
 +                      goto out;
 +              }
 +      }
 +
 + out:
 +      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +}
 +
 +/**
 + * _scsih_search_responding_pcie_devices -
 + * @ioc: per adapter object
 + *
 + * After host reset, find out whether devices are still responding.
 + * If not remove.
 + *
 + * Return nothing.
 + */
 +static void
 +_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
 +{
 +      Mpi26PCIeDevicePage0_t pcie_device_pg0;
 +      Mpi2ConfigReply_t mpi_reply;
 +      u16 ioc_status;
 +      u16 handle;
 +      u32 device_info;
 +
 +      pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
 +
 +      if (list_empty(&ioc->pcie_device_list))
 +              goto out;
 +
 +      handle = 0xFFFF;
 +      while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
 +              &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
 +              handle))) {
 +              ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 +                  MPI2_IOCSTATUS_MASK;
 +              if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
 +                      pr_info(MPT3SAS_FMT "\tbreak from %s: "
 +                          "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name,
 +                          __func__, ioc_status,
 +                          le32_to_cpu(mpi_reply.IOCLogInfo));
 +                      break;
 +              }
 +              handle = le16_to_cpu(pcie_device_pg0.DevHandle);
 +              device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
 +              if (!(_scsih_is_nvme_device(device_info)))
 +                      continue;
 +              pcie_device_pg0.WWID = le64_to_cpu(pcie_device_pg0.WWID),
 +              pcie_device_pg0.Slot = le16_to_cpu(pcie_device_pg0.Slot);
 +              pcie_device_pg0.Flags = le32_to_cpu(pcie_device_pg0.Flags);
 +              pcie_device_pg0.DevHandle = handle;
 +              _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
 +      }
 +out:
 +      pr_info(MPT3SAS_FMT "search for PCIe end-devices: complete\n",
 +          ioc->name);
 +}
 +
  /**
   * _scsih_mark_responding_raid_device - mark a raid_device as responding
   * @ioc: per adapter object
@@@ -8882,7 -7322,8 +8882,7 @@@ _scsih_search_responding_raid_devices(s
  /**
   * _scsih_mark_responding_expander - mark a expander as responding
   * @ioc: per adapter object
 - * @sas_address: sas address
 - * @handle:
 + * @expander_pg0:SAS Expander Config Page0
   *
   * After host reset, find out whether devices are still responding.
   * Used in _scsih_remove_unresponsive_expanders.
   * Return nothing.
   */
  static void
 -_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
 -      u16 handle)
 +_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
 +      Mpi2ExpanderPage0_t *expander_pg0)
  {
 -      struct _sas_node *sas_expander;
 +      struct _sas_node *sas_expander = NULL;
        unsigned long flags;
 -      int i;
 +      int i, encl_pg0_rc = -1;
 +      Mpi2ConfigReply_t mpi_reply;
 +      Mpi2SasEnclosurePage0_t enclosure_pg0;
 +      u16 handle = le16_to_cpu(expander_pg0->DevHandle);
 +      u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
 +
 +      if (le16_to_cpu(expander_pg0->EnclosureHandle)) {
 +              encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
 +                  &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
 +                  le16_to_cpu(expander_pg0->EnclosureHandle));
 +              if (encl_pg0_rc)
 +                      pr_info(MPT3SAS_FMT
 +                          "Enclosure Pg0 read failed for handle(0x%04x)\n",
 +                          ioc->name,
 +                          le16_to_cpu(expander_pg0->EnclosureHandle));
 +      }
  
        spin_lock_irqsave(&ioc->sas_node_lock, flags);
        list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
                if (sas_expander->sas_address != sas_address)
                        continue;
                sas_expander->responding = 1;
 +
 +              if (!encl_pg0_rc)
 +                      sas_expander->enclosure_logical_id =
 +                          le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
 +
 +              sas_expander->enclosure_handle =
 +                  le16_to_cpu(expander_pg0->EnclosureHandle);
 +
                if (sas_expander->handle == handle)
                        goto out;
                pr_info("\texpander(0x%016llx): handle changed" \
@@@ -8977,7 -7395,7 +8977,7 @@@ _scsih_search_responding_expanders(stru
                pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
                        handle,
                    (unsigned long long)sas_address);
 -              _scsih_mark_responding_expander(ioc, sas_address, handle);
 +              _scsih_mark_responding_expander(ioc, &expander_pg0);
        }
  
   out:
  }
  
  /**
 - * _scsih_remove_unresponding_sas_devices - removing unresponding devices
 + * _scsih_remove_unresponding_devices - removing unresponding devices
   * @ioc: per adapter object
   *
   * Return nothing.
   */
  static void
 -_scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
 +_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
  {
        struct _sas_device *sas_device, *sas_device_next;
        struct _sas_node *sas_expander, *sas_expander_next;
        struct _raid_device *raid_device, *raid_device_next;
 +      struct _pcie_device *pcie_device, *pcie_device_next;
        struct list_head tmp_list;
        unsigned long flags;
        LIST_HEAD(head);
                sas_device_put(sas_device);
        }
  
 +      pr_info(MPT3SAS_FMT
 +              " Removing unresponding devices: pcie end-devices\n"
 +              , ioc->name);
 +      INIT_LIST_HEAD(&head);
 +      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +      list_for_each_entry_safe(pcie_device, pcie_device_next,
 +          &ioc->pcie_device_list, list) {
 +              if (!pcie_device->responding)
 +                      list_move_tail(&pcie_device->list, &head);
 +              else
 +                      pcie_device->responding = 0;
 +      }
 +      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +
 +      list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
 +              _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
 +              list_del_init(&pcie_device->list);
 +              pcie_device_put(pcie_device);
 +      }
 +
        /* removing unresponding volumes */
        if (ioc->ir_firmware) {
                pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
        spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
        list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
            list) {
 -              list_del(&sas_expander->list);
                _scsih_expander_node_remove(ioc, sas_expander);
        }
  
@@@ -9122,7 -7520,6 +9122,7 @@@ _scsih_scan_for_devices_after_reset(str
  {
        Mpi2ExpanderPage0_t expander_pg0;
        Mpi2SasDevicePage0_t sas_device_pg0;
 +      Mpi26PCIeDevicePage0_t pcie_device_pg0;
        Mpi2RaidVolPage1_t volume_pg1;
        Mpi2RaidVolPage0_t volume_pg0;
        Mpi2RaidPhysDiskPage0_t pd_pg0;
        u16 handle, parent_handle;
        u64 sas_address;
        struct _sas_device *sas_device;
 +      struct _pcie_device *pcie_device;
        struct _sas_node *expander_device;
        static struct _raid_device *raid_device;
        u8 retry_count;
        }
        pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
            ioc->name);
 +      pr_info(MPT3SAS_FMT "\tscan devices: pcie end devices start\n",
 +          ioc->name);
 +
 +      /* pcie devices */
 +      handle = 0xFFFF;
 +      while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
 +              &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
 +              handle))) {
 +              ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
 +                              & MPI2_IOCSTATUS_MASK;
 +              if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
 +                      pr_info(MPT3SAS_FMT "\tbreak from pcie end device"
 +                              " scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
 +                              ioc->name, ioc_status,
 +                              le32_to_cpu(mpi_reply.IOCLogInfo));
 +                      break;
 +              }
 +              handle = le16_to_cpu(pcie_device_pg0.DevHandle);
 +              if (!(_scsih_is_nvme_device(
 +                      le32_to_cpu(pcie_device_pg0.DeviceInfo))))
 +                      continue;
 +              pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
 +                              le64_to_cpu(pcie_device_pg0.WWID));
 +              if (pcie_device) {
 +                      pcie_device_put(pcie_device);
 +                      continue;
 +              }
 +              retry_count = 0;
 +              parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
 +              _scsih_pcie_add_device(ioc, handle);
  
 +              pr_info(MPT3SAS_FMT "\tAFTER adding pcie end device: "
 +                      "handle (0x%04x), wwid(0x%016llx)\n", ioc->name,
 +                      handle,
 +                      (unsigned long long) le64_to_cpu(pcie_device_pg0.WWID));
 +      }
 +      pr_info(MPT3SAS_FMT "\tpcie devices: pcie end devices complete\n",
 +              ioc->name);
        pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
  }
  /**
@@@ -9446,7 -7805,6 +9446,7 @@@ mpt3sas_scsih_reset_handler(struct MPT3
                    !ioc->sas_hba.num_phys)) {
                        _scsih_prep_device_scan(ioc);
                        _scsih_search_responding_sas_devices(ioc);
 +                      _scsih_search_responding_pcie_devices(ioc);
                        _scsih_search_responding_raid_devices(ioc);
                        _scsih_search_responding_expanders(ioc);
                        _scsih_error_recovery_delete_devices(ioc);
@@@ -9491,7 -7849,7 +9491,7 @@@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTE
                                goto out;
                        ssleep(1);
                }
 -              _scsih_remove_unresponding_sas_devices(ioc);
 +              _scsih_remove_unresponding_devices(ioc);
                _scsih_scan_for_devices_after_reset(ioc);
                break;
        case MPT3SAS_PORT_ENABLE_COMPLETE:
        case MPI2_EVENT_IR_OPERATION_STATUS:
                _scsih_sas_ir_operation_status_event(ioc, fw_event);
                break;
 +      case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
 +              _scsih_pcie_device_status_change_event(ioc, fw_event);
 +              break;
 +      case MPI2_EVENT_PCIE_ENUMERATION:
 +              _scsih_pcie_enumeration_event(ioc, fw_event);
 +              break;
 +      case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
 +              _scsih_pcie_topology_change_event(ioc, fw_event);
 +                      return;
 +      break;
        }
  out:
        fw_event_work_put(fw_event);
@@@ -9634,11 -7982,6 +9634,11 @@@ mpt3sas_scsih_event_callback(struct MPT
                    (Mpi2EventDataSasTopologyChangeList_t *)
                    mpi_reply->EventData);
                break;
 +      case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
 +      _scsih_check_pcie_topo_remove_events(ioc,
 +                  (Mpi26EventDataPCIeTopologyChangeList_t *)
 +                  mpi_reply->EventData);
 +              break;
        case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
                _scsih_check_ir_config_unhide_events(ioc,
                    (Mpi2EventDataIrConfigChangeList_t *)
        case MPI2_EVENT_SAS_DISCOVERY:
        case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
        case MPI2_EVENT_IR_PHYSICAL_DISK:
 +      case MPI2_EVENT_PCIE_ENUMERATION:
 +      case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
                break;
  
        case MPI2_EVENT_TEMP_THRESHOLD:
                    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
                switch (ActiveCableEventData->ReasonCode) {
                case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
 -                      pr_notice(MPT3SAS_FMT "Receptacle ID %d: This active cable"
 -                                " requires %d mW of power\n", ioc->name,
 -                           ActiveCableEventData->ReceptacleID,
 +                      pr_notice(MPT3SAS_FMT
 +                          "Currently an active cable with ReceptacleID %d\n",
 +                          ioc->name, ActiveCableEventData->ReceptacleID);
 +                      pr_notice("cannot be powered and devices connected\n");
 +                      pr_notice("to this active cable will not be seen\n");
 +                      pr_notice("This active cable requires %d mW of power\n",
                             ActiveCableEventData->ActiveCablePowerRequirement);
 -                      pr_notice(MPT3SAS_FMT "Receptacle ID %d: Devices connected"
 -                                " to this active cable will not be seen\n",
 -                           ioc->name, ActiveCableEventData->ReceptacleID);
                        break;
  
                case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
 -                      pr_notice(MPT3SAS_FMT "ReceptacleID %d: This cable",
 -                              ioc->name, ActiveCableEventData->ReceptacleID);
 -                      pr_notice(" is not running at an optimal speed(12 Gb/s)\n");
 +                      pr_notice(MPT3SAS_FMT
 +                          "Currently a cable with ReceptacleID %d\n",
 +                          ioc->name, ActiveCableEventData->ReceptacleID);
 +                      pr_notice(
 +                          "is not running at optimal speed(12 Gb/s rate)\n");
                        break;
                }
  
   * _scsih_expander_node_remove - removing expander device from list.
   * @ioc: per adapter object
   * @sas_expander: the sas_device object
 - * Context: Calling function should acquire ioc->sas_node_lock.
   *
   * Removing object and freeing associated memory from the
   * ioc->sas_expander_list.
@@@ -9772,7 -8112,6 +9772,7 @@@ _scsih_expander_node_remove(struct MPT3
        struct _sas_node *sas_expander)
  {
        struct _sas_port *mpt3sas_port, *next;
 +      unsigned long flags;
  
        /* remove sibling ports attached to this expander */
        list_for_each_entry_safe(mpt3sas_port, next,
            sas_expander->handle, (unsigned long long)
            sas_expander->sas_address);
  
 +      spin_lock_irqsave(&ioc->sas_node_lock, flags);
 +      list_del(&sas_expander->list);
 +      spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 +
        kfree(sas_expander->phy);
        kfree(sas_expander);
  }
@@@ -9896,7 -8231,6 +9896,7 @@@ static void scsih_remove(struct pci_de
        struct _sas_port *mpt3sas_port, *next_port;
        struct _raid_device *raid_device, *next;
        struct MPT3SAS_TARGET *sas_target_priv_data;
 +      struct _pcie_device *pcie_device, *pcienext;
        struct workqueue_struct *wq;
        unsigned long flags;
  
                    (unsigned long long) raid_device->wwid);
                _scsih_raid_device_remove(ioc, raid_device);
        }
 +      list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
 +              list) {
 +              _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
 +              list_del_init(&pcie_device->list);
 +              pcie_device_put(pcie_device);
 +      }
  
        /* free ports attached to the sas_host */
        list_for_each_entry_safe(mpt3sas_port, next_port,
@@@ -10002,52 -8330,42 +10002,52 @@@ scsih_shutdown(struct pci_dev *pdev
  static void
  _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
  {
 -      u8 is_raid;
 +      u32 channel;
        void *device;
        struct _sas_device *sas_device;
        struct _raid_device *raid_device;
 +      struct _pcie_device *pcie_device;
        u16 handle;
        u64 sas_address_parent;
        u64 sas_address;
        unsigned long flags;
        int rc;
 +      int tid;
  
         /* no Bios, return immediately */
        if (!ioc->bios_pg3.BiosVersion)
                return;
  
        device = NULL;
 -      is_raid = 0;
        if (ioc->req_boot_device.device) {
                device =  ioc->req_boot_device.device;
 -              is_raid = ioc->req_boot_device.is_raid;
 +              channel = ioc->req_boot_device.channel;
        } else if (ioc->req_alt_boot_device.device) {
                device =  ioc->req_alt_boot_device.device;
 -              is_raid = ioc->req_alt_boot_device.is_raid;
 +              channel = ioc->req_alt_boot_device.channel;
        } else if (ioc->current_boot_device.device) {
                device =  ioc->current_boot_device.device;
 -              is_raid = ioc->current_boot_device.is_raid;
 +              channel = ioc->current_boot_device.channel;
        }
  
        if (!device)
                return;
  
 -      if (is_raid) {
 +      if (channel == RAID_CHANNEL) {
                raid_device = device;
                rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
                    raid_device->id, 0);
                if (rc)
                        _scsih_raid_device_remove(ioc, raid_device);
 +      } else if (channel == PCIE_CHANNEL) {
 +              spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +              pcie_device = device;
 +              tid = pcie_device->id;
 +              list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
 +              spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +              rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
 +              if (rc)
 +                      _scsih_pcie_device_remove(ioc, pcie_device);
        } else {
                spin_lock_irqsave(&ioc->sas_device_lock, flags);
                sas_device = device;
@@@ -10179,101 -8497,6 +10179,101 @@@ _scsih_probe_sas(struct MPT3SAS_ADAPTE
        }
  }
  
 +/**
 + * get_next_pcie_device - Get the next pcie device
 + * @ioc: per adapter object
 + *
 + * Get the next pcie device from pcie_device_init_list list.
 + *
 + * Returns pcie device structure if pcie_device_init_list list is not empty
 + * otherwise returns NULL
 + */
 +static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
 +{
 +      struct _pcie_device *pcie_device = NULL;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +      if (!list_empty(&ioc->pcie_device_init_list)) {
 +              pcie_device = list_first_entry(&ioc->pcie_device_init_list,
 +                              struct _pcie_device, list);
 +              pcie_device_get(pcie_device);
 +      }
 +      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +
 +      return pcie_device;
 +}
 +
 +/**
 + * pcie_device_make_active - Add pcie device to pcie_device_list list
 + * @ioc: per adapter object
 + * @pcie_device: pcie device object
 + *
 + * Add the pcie device which has registered with SCSI Transport Later to
 + * pcie_device_list list
 + */
 +static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
 +              struct _pcie_device *pcie_device)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 +
 +      if (!list_empty(&pcie_device->list)) {
 +              list_del_init(&pcie_device->list);
 +              pcie_device_put(pcie_device);
 +      }
 +      pcie_device_get(pcie_device);
 +      list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
 +
 +      spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 +}
 +
 +/**
 + * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
 + * @ioc: per adapter object
 + *
 + * Called during initial loading of the driver.
 + */
 +static void
 +_scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
 +{
 +      struct _pcie_device *pcie_device;
 +      int rc;
 +
 +      /* PCIe Device List */
 +      while ((pcie_device = get_next_pcie_device(ioc))) {
 +              if (pcie_device->starget) {
 +                      pcie_device_put(pcie_device);
 +                      continue;
 +              }
 +              rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
 +                      pcie_device->id, 0);
 +              if (rc) {
 +                      _scsih_pcie_device_remove(ioc, pcie_device);
 +                      pcie_device_put(pcie_device);
 +                      continue;
 +              } else if (!pcie_device->starget) {
 +                      /*
 +                       * When async scanning is enabled, its not possible to
 +                       * remove devices while scanning is turned on due to an
 +                       * oops in scsi_sysfs_add_sdev()->add_device()->
 +                       * sysfs_addrm_start()
 +                       */
 +                      if (!ioc->is_driver_loading) {
 +                      /* TODO-- Need to find out whether this condition will
 +                       * occur or not
 +                       */
 +                              _scsih_pcie_device_remove(ioc, pcie_device);
 +                              pcie_device_put(pcie_device);
 +                              continue;
 +                      }
 +              }
 +              pcie_device_make_active(ioc, pcie_device);
 +              pcie_device_put(pcie_device);
 +      }
 +}
 +
  /**
   * _scsih_probe_devices - probing for devices
   * @ioc: per adapter object
@@@ -10302,10 -8525,8 +10302,10 @@@ _scsih_probe_devices(struct MPT3SAS_ADA
                        _scsih_probe_sas(ioc);
                        _scsih_probe_raid(ioc);
                }
 -      } else
 +      } else {
                _scsih_probe_sas(ioc);
 +              _scsih_probe_pcie(ioc);
 +      }
  }
  
  /**
@@@ -10519,7 -8740,6 +10519,7 @@@ _scsih_determine_hba_mpi_version(struc
        case MPI26_MFGPAGE_DEVID_SAS3516:
        case MPI26_MFGPAGE_DEVID_SAS3516_1:
        case MPI26_MFGPAGE_DEVID_SAS3416:
 +      case MPI26_MFGPAGE_DEVID_SAS3616:
                return MPI26_VERSION;
        }
        return 0;
@@@ -10597,7 -8817,6 +10597,7 @@@ _scsih_probe(struct pci_dev *pdev, cons
                case MPI26_MFGPAGE_DEVID_SAS3516:
                case MPI26_MFGPAGE_DEVID_SAS3516_1:
                case MPI26_MFGPAGE_DEVID_SAS3416:
 +              case MPI26_MFGPAGE_DEVID_SAS3616:
                        ioc->is_gen35_ioc = 1;
                        break;
                default:
        spin_lock_init(&ioc->sas_node_lock);
        spin_lock_init(&ioc->fw_event_lock);
        spin_lock_init(&ioc->raid_device_lock);
 +      spin_lock_init(&ioc->pcie_device_lock);
        spin_lock_init(&ioc->diag_trigger_lock);
  
        INIT_LIST_HEAD(&ioc->sas_device_list);
        INIT_LIST_HEAD(&ioc->sas_device_init_list);
        INIT_LIST_HEAD(&ioc->sas_expander_list);
 +      INIT_LIST_HEAD(&ioc->pcie_device_list);
 +      INIT_LIST_HEAD(&ioc->pcie_device_init_list);
        INIT_LIST_HEAD(&ioc->fw_event_list);
        INIT_LIST_HEAD(&ioc->raid_device_list);
        INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
@@@ -11057,9 -9273,6 +11057,9 @@@ static const struct pci_device_id mpt3s
                PCI_ANY_ID, PCI_ANY_ID },
        { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
                PCI_ANY_ID, PCI_ANY_ID },
 +      /* Mercator ~ 3616*/
 +      { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
 +              PCI_ANY_ID, PCI_ANY_ID },
        {0}     /* Terminating entry */
  };
  MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
index ddb46fa2d07f45bd6fa3463c9f3fe36ac4773595,5532c440bf61be00193101ecc2e15e5e63e17400..b4ba2b1dab767cac45a96dd6f0e2064ad138447c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0
  /*
   * Based on the same principle as kgdboe using the NETPOLL api, this
   * driver uses a console polling api to implement a gdb serial inteface
@@@ -7,6 -6,10 +7,6 @@@
   * Maintainer: Jason Wessel <jason.wessel@windriver.com>
   *
   * 2007-2008 (c) Jason Wessel - Wind River Systems, Inc.
 - *
 - * This file is licensed under the terms of the GNU General Public
 - * License version 2. This program is licensed "as is" without any
 - * warranty of any kind, whether express or implied.
   */
  #include <linux/kernel.h>
  #include <linux/ctype.h>
@@@ -242,7 -245,8 +242,8 @@@ static void kgdboc_put_char(u8 chr
                                        kgdb_tty_line, chr);
  }
  
- static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
+ static int param_set_kgdboc_var(const char *kmessage,
+                               const struct kernel_param *kp)
  {
        int len = strlen(kmessage);
  
diff --combined fs/fuse/inode.c
index 94a745acaef842eed799e45731bd12dd2ac0f7f7,7d67fc150edcf7d9614f512b851dd6481e45fa86..a79e320349cd79b8032feb20cd17c4fd5f25fd64
@@@ -31,7 -31,7 +31,7 @@@ static struct kmem_cache *fuse_inode_ca
  struct list_head fuse_conn_list;
  DEFINE_MUTEX(fuse_mutex);
  
- static int set_global_limit(const char *val, struct kernel_param *kp);
+ static int set_global_limit(const char *val, const struct kernel_param *kp);
  
  unsigned max_user_bgreq;
  module_param_call(max_user_bgreq, set_global_limit, param_get_uint,
@@@ -823,7 -823,7 +823,7 @@@ static void sanitize_global_limit(unsig
                *limit = (1 << 16) - 1;
  }
  
- static int set_global_limit(const char *val, struct kernel_param *kp)
+ static int set_global_limit(const char *val, const struct kernel_param *kp)
  {
        int rv;
  
@@@ -1059,7 -1059,7 +1059,7 @@@ static int fuse_fill_super(struct super
        if (sb->s_flags & MS_MANDLOCK)
                goto err;
  
 -      sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
 +      sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION);
  
        if (!parse_fuse_opt(data, &d, is_bdev))
                goto err;
index 1d7140fef154c8bd68b4c3a55f0f7dfa02136d8a,20386252fe3ec94c88c7e400ed14126f40cdc631..ba36506db4fb71f4b4297f81c1bcb893d30b8145
@@@ -1,4 -1,3 +1,4 @@@
 +/* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _LINUX_MODULE_PARAMS_H
  #define _LINUX_MODULE_PARAMS_H
  /* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */
@@@ -228,19 -227,11 +228,11 @@@ struct kparam_arra
            VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
  
  /* Obsolete - use module_param_cb() */
- #define module_param_call(name, set, get, arg, perm)                  \
-       static const struct kernel_param_ops __param_ops_##name =               \
-               { .flags = 0, (void *)set, (void *)get };               \
+ #define module_param_call(name, _set, _get, arg, perm)                        \
+       static const struct kernel_param_ops __param_ops_##name =       \
+               { .flags = 0, .set = _set, .get = _get };               \
        __module_param_call(MODULE_PARAM_PREFIX,                        \
-                           name, &__param_ops_##name, arg,             \
-                           (perm) + sizeof(__check_old_set_param(set))*0, -1, 0)
- /* We don't get oldget: it's often a new-style param_get_uint, etc. */
- static inline int
- __check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
- {
-       return 0;
- }
+                           name, &__param_ops_##name, arg, perm, -1, 0)
  
  #ifdef CONFIG_SYSFS
  extern void kernel_param_lock(struct module *mod);
index 792c3f6d30ce8fa336c3c78aebcc4ba991b79b58,fd5241ce1fc930c77daa1c082ed2011f68a501bf..f5223bf2c420a0d21ba167131b07d6ce9daa6b70
@@@ -1,4 -1,3 +1,4 @@@
 +/* SPDX-License-Identifier: GPL-2.0 */
  /*
   * Connection state tracking for netfilter.  This is separated from,
   * but required by, the (future) NAT layer; it can also be used by an iptables
@@@ -285,7 -284,7 +285,7 @@@ static inline bool nf_ct_should_gc(cons
  
  struct kernel_param;
  
- int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
+ int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
  int nf_conntrack_hash_resize(unsigned int hashsize);
  
  extern struct hlist_nulls_head *nf_conntrack_hash;
diff --combined kernel/module.c
index 32c2cdaccd93966cd0fda561511fe90674e45022,07ef44767245a7e459ee484c92ee5de755b187f6..222aba4aa960a947488afa52e21c11a3d47d457a
@@@ -278,16 -278,6 +278,16 @@@ static bool sig_enforce = IS_ENABLED(CO
  module_param(sig_enforce, bool_enable_only, 0644);
  #endif /* !CONFIG_MODULE_SIG_FORCE */
  
 +/*
 + * Export sig_enforce kernel cmdline parameter to allow other subsystems rely
 + * on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
 + */
 +bool is_module_sig_enforced(void)
 +{
 +      return sig_enforce;
 +}
 +EXPORT_SYMBOL(is_module_sig_enforced);
 +
  /* Block module loading/unloading? */
  int modules_disabled = 0;
  core_param(nomodule, modules_disabled, bint, 0);
@@@ -847,10 -837,8 +847,8 @@@ static int add_module_usage(struct modu
  
        pr_debug("Allocating new usage for %s.\n", a->name);
        use = kmalloc(sizeof(*use), GFP_ATOMIC);
-       if (!use) {
-               pr_warn("%s: out of memory loading\n", a->name);
+       if (!use)
                return -ENOMEM;
-       }
  
        use->source = a;
        use->target = b;
@@@ -1526,7 -1514,7 +1524,7 @@@ static void add_sect_attrs(struct modul
                sattr->mattr.show = module_sect_show;
                sattr->mattr.store = NULL;
                sattr->mattr.attr.name = sattr->name;
 -              sattr->mattr.attr.mode = S_IRUGO;
 +              sattr->mattr.attr.mode = S_IRUSR;
                *(gattr++) = &(sattr++)->mattr.attr;
        }
        *gattr = NULL;
@@@ -4157,7 -4145,6 +4155,7 @@@ static int m_show(struct seq_file *m, v
  {
        struct module *mod = list_entry(p, struct module, list);
        char buf[MODULE_FLAGS_BUF_SIZE];
 +      unsigned long value;
  
        /* We always ignore unformed modules. */
        if (mod->state == MODULE_STATE_UNFORMED)
                   mod->state == MODULE_STATE_COMING ? "Loading" :
                   "Live");
        /* Used by oprofile and other similar tools. */
 -      seq_printf(m, " 0x%pK", mod->core_layout.base);
 +      value = m->private ? 0 : (unsigned long)mod->core_layout.base;
 +      seq_printf(m, " 0x" KALLSYM_FMT, value);
  
        /* Taints info */
        if (mod->taints)
@@@ -4196,23 -4182,9 +4194,23 @@@ static const struct seq_operations modu
        .show   = m_show
  };
  
 +/*
 + * This also sets the "private" pointer to non-NULL if the
 + * kernel pointers should be hidden (so you can just test
 + * "m->private" to see if you should keep the values private).
 + *
 + * We use the same logic as for /proc/kallsyms.
 + */
  static int modules_open(struct inode *inode, struct file *file)
  {
 -      return seq_open(file, &modules_op);
 +      int err = seq_open(file, &modules_op);
 +
 +      if (!err) {
 +              struct seq_file *m = file->private_data;
 +              m->private = kallsyms_show_value() ? NULL : (void *)8ul;
 +      }
 +
 +      return 0;
  }
  
  static const struct file_operations proc_modules_operations = {
index 5749fcaa2770ed1d13276b3eba764f239e1c8e04,5e50c54e1318e554284eb09bb12269c8d5c6a5f8..85f643c1e227c5a70c3b3038baad981e88dca880
@@@ -1083,7 -1083,7 +1083,7 @@@ static void gc_worker(struct work_struc
        next_run = gc_work->next_gc_run;
        gc_work->last_bucket = i;
        gc_work->early_drop = false;
 -      queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
 +      queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
  }
  
  static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
@@@ -1419,7 -1419,7 +1419,7 @@@ repeat
        /* Decide what timeout policy we want to apply to this flow. */
        timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
  
 -      ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, timeouts);
 +      ret = l4proto->packet(ct, skb, dataoff, ctinfo, timeouts);
        if (ret <= 0) {
                /* Invalid: inverse of the return code tells
                 * the netfilter core what to do */
@@@ -1563,14 -1563,9 +1563,14 @@@ int nf_ct_port_nlattr_to_tuple(struct n
  }
  EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
  
 -int nf_ct_port_nlattr_tuple_size(void)
 +unsigned int nf_ct_port_nlattr_tuple_size(void)
  {
 -      return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
 +      static unsigned int size __read_mostly;
 +
 +      if (!size)
 +              size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
 +
 +      return size;
  }
  EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
  #endif
@@@ -1945,7 -1940,7 +1945,7 @@@ int nf_conntrack_hash_resize(unsigned i
        return 0;
  }
  
- int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
+ int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
  {
        unsigned int hashsize;
        int rc;
@@@ -2089,7 -2084,7 +2089,7 @@@ int nf_conntrack_init_start(void
                goto err_proto;
  
        conntrack_gc_work_init(&conntrack_gc_work);
 -      queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
 +      queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
  
        return 0;
  
diff --combined net/sunrpc/svc.c
index 33f4ae68426d65837b15a9e91b625bfc36ca05c4,e5e4e18699c8b02ffe5600e451b59eb6d78588a1..387cc4add6f6551fe9a9b5cc96566aab00b843bc
@@@ -50,7 -50,7 +50,7 @@@ EXPORT_SYMBOL_GPL(svc_pool_map)
  static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
  
  static int
- param_set_pool_mode(const char *val, struct kernel_param *kp)
+ param_set_pool_mode(const char *val, const struct kernel_param *kp)
  {
        int *ip = (int *)kp->arg;
        struct svc_pool_map *m = &svc_pool_map;
@@@ -80,7 -80,7 +80,7 @@@ out
  }
  
  static int
- param_get_pool_mode(char *buf, struct kernel_param *kp)
+ param_get_pool_mode(char *buf, const struct kernel_param *kp)
  {
        int *ip = (int *)kp->arg;
  
@@@ -455,7 -455,7 +455,7 @@@ __svc_create(struct svc_program *prog, 
        serv->sv_xdrsize   = xdrsize;
        INIT_LIST_HEAD(&serv->sv_tempsocks);
        INIT_LIST_HEAD(&serv->sv_permsocks);
 -      init_timer(&serv->sv_temptimer);
 +      timer_setup(&serv->sv_temptimer, NULL, 0);
        spin_lock_init(&serv->sv_lock);
  
        __svc_init_bc(serv);
diff --combined security/apparmor/lsm.c
index 1346ee5be04f13e4bb64658755605852b8789ab5,f46a12b339bc408325f4cce510355d9cd640ff72..17893fde44873ade4c28a62b7a3729e73d7c161d
@@@ -33,6 -33,7 +33,6 @@@
  #include "include/context.h"
  #include "include/file.h"
  #include "include/ipc.h"
 -#include "include/net.h"
  #include "include/path.h"
  #include "include/label.h"
  #include "include/policy.h"
@@@ -736,6 -737,368 +736,6 @@@ static int apparmor_task_kill(struct ta
        return error;
  }
  
 -/**
 - * apparmor_sk_alloc_security - allocate and attach the sk_security field
 - */
 -static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
 -{
 -      struct aa_sk_ctx *ctx;
 -
 -      ctx = kzalloc(sizeof(*ctx), flags);
 -      if (!ctx)
 -              return -ENOMEM;
 -
 -      SK_CTX(sk) = ctx;
 -
 -      return 0;
 -}
 -
 -/**
 - * apparmor_sk_free_security - free the sk_security field
 - */
 -static void apparmor_sk_free_security(struct sock *sk)
 -{
 -      struct aa_sk_ctx *ctx = SK_CTX(sk);
 -
 -      SK_CTX(sk) = NULL;
 -      aa_put_label(ctx->label);
 -      aa_put_label(ctx->peer);
 -      path_put(&ctx->path);
 -      kfree(ctx);
 -}
 -
 -/**
 - * apparmor_clone_security - clone the sk_security field
 - */
 -static void apparmor_sk_clone_security(const struct sock *sk,
 -                                     struct sock *newsk)
 -{
 -      struct aa_sk_ctx *ctx = SK_CTX(sk);
 -      struct aa_sk_ctx *new = SK_CTX(newsk);
 -
 -      new->label = aa_get_label(ctx->label);
 -      new->peer = aa_get_label(ctx->peer);
 -      new->path = ctx->path;
 -      path_get(&new->path);
 -}
 -
 -static int aa_sock_create_perm(struct aa_label *label, int family, int type,
 -                             int protocol)
 -{
 -      AA_BUG(!label);
 -      AA_BUG(in_interrupt());
 -
 -      return aa_af_perm(label, OP_CREATE, AA_MAY_CREATE, family, type,
 -                        protocol);
 -}
 -
 -
 -/**
 - * apparmor_socket_create - check perms before creating a new socket
 - */
 -static int apparmor_socket_create(int family, int type, int protocol, int kern)
 -{
 -      struct aa_label *label;
 -      int error = 0;
 -
 -      label = begin_current_label_crit_section();
 -      if (!(kern || unconfined(label)))
 -              error = aa_sock_create_perm(label, family, type, protocol);
 -      end_current_label_crit_section(label);
 -
 -      return error;
 -}
 -
 -/**
 - * apparmor_socket_post_create - setup the per-socket security struct
 - *
 - * Note:
 - * -   kernel sockets currently labeled unconfined but we may want to
 - *     move to a special kernel label
 - * -   socket may not have sk here if created with sock_create_lite or
 - *     sock_alloc. These should be accept cases which will be handled in
 - *     sock_graft.
 - */
 -static int apparmor_socket_post_create(struct socket *sock, int family,
 -                                     int type, int protocol, int kern)
 -{
 -      struct aa_label *label;
 -
 -      if (kern) {
 -              struct aa_ns *ns = aa_get_current_ns();
 -
 -              label = aa_get_label(ns_unconfined(ns));
 -              aa_put_ns(ns);
 -      } else
 -              label = aa_get_current_label();
 -
 -      if (sock->sk) {
 -              struct aa_sk_ctx *ctx = SK_CTX(sock->sk);
 -
 -              aa_put_label(ctx->label);
 -              ctx->label = aa_get_label(label);
 -      }
 -      aa_put_label(label);
 -
 -      return 0;
 -}
 -
 -/**
 - * apparmor_socket_bind - check perms before bind addr to socket
 - */
 -static int apparmor_socket_bind(struct socket *sock,
 -                              struct sockaddr *address, int addrlen)
 -{
 -      AA_BUG(!sock);
 -      AA_BUG(!sock->sk);
 -      AA_BUG(!address);
 -      AA_BUG(in_interrupt());
 -
 -      return aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk);
 -}
 -
 -/**
 - * apparmor_socket_connect - check perms before connecting @sock to @address
 - */
 -static int apparmor_socket_connect(struct socket *sock,
 -                                 struct sockaddr *address, int addrlen)
 -{
 -      AA_BUG(!sock);
 -      AA_BUG(!sock->sk);
 -      AA_BUG(!address);
 -      AA_BUG(in_interrupt());
 -
 -      return aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk);
 -}
 -
 -/**
 - * apparmor_socket_list - check perms before allowing listen
 - */
 -static int apparmor_socket_listen(struct socket *sock, int backlog)
 -{
 -      AA_BUG(!sock);
 -      AA_BUG(!sock->sk);
 -      AA_BUG(in_interrupt());
 -
 -      return aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk);
 -}
 -
 -/**
 - * apparmor_socket_accept - check perms before accepting a new connection.
 - *
 - * Note: while @newsock is created and has some information, the accept
 - *       has not been done.
 - */
 -static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
 -{
 -      AA_BUG(!sock);
 -      AA_BUG(!sock->sk);
 -      AA_BUG(!newsock);
 -      AA_BUG(in_interrupt());
 -
 -      return aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk);
 -}
 -
 -static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock,
 -                          struct msghdr *msg, int size)
 -{
 -      AA_BUG(!sock);
 -      AA_BUG(!sock->sk);
 -      AA_BUG(!msg);
 -      AA_BUG(in_interrupt());
 -
 -      return aa_sk_perm(op, request, sock->sk);
 -}
 -
 -/**
 - * apparmor_socket_sendmsg - check perms before sending msg to another socket
 - */
 -static int apparmor_socket_sendmsg(struct socket *sock,
 -                                 struct msghdr *msg, int size)
 -{
 -      return aa_sock_msg_perm(OP_SENDMSG, AA_MAY_SEND, sock, msg, size);
 -}
 -
 -/**
 - * apparmor_socket_recvmsg - check perms before receiving a message
 - */
 -static int apparmor_socket_recvmsg(struct socket *sock,
 -                                 struct msghdr *msg, int size, int flags)
 -{
 -      return aa_sock_msg_perm(OP_RECVMSG, AA_MAY_RECEIVE, sock, msg, size);
 -}
 -
 -/* revaliation, get/set attr, shutdown */
 -static int aa_sock_perm(const char *op, u32 request, struct socket *sock)
 -{
 -      AA_BUG(!sock);
 -      AA_BUG(!sock->sk);
 -      AA_BUG(in_interrupt());
 -
 -      return aa_sk_perm(op, request, sock->sk);
 -}
 -
 -/**
 - * apparmor_socket_getsockname - check perms before getting the local address
 - */
 -static int apparmor_socket_getsockname(struct socket *sock)
 -{
 -      return aa_sock_perm(OP_GETSOCKNAME, AA_MAY_GETATTR, sock);
 -}
 -
 -/**
 - * apparmor_socket_getpeername - check perms before getting remote address
 - */
 -static int apparmor_socket_getpeername(struct socket *sock)
 -{
 -      return aa_sock_perm(OP_GETPEERNAME, AA_MAY_GETATTR, sock);
 -}
 -
 -/* revaliation, get/set attr, opt */
 -static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock,
 -                          int level, int optname)
 -{
 -      AA_BUG(!sock);
 -      AA_BUG(!sock->sk);
 -      AA_BUG(in_interrupt());
 -
 -      return aa_sk_perm(op, request, sock->sk);
 -}
 -
 -/**
 - * apparmor_getsockopt - check perms before getting socket options
 - */
 -static int apparmor_socket_getsockopt(struct socket *sock, int level,
 -                                    int optname)
 -{
 -      return aa_sock_opt_perm(OP_GETSOCKOPT, AA_MAY_GETOPT, sock,
 -                              level, optname);
 -}
 -
 -/**
 - * apparmor_setsockopt - check perms before setting socket options
 - */
 -static int apparmor_socket_setsockopt(struct socket *sock, int level,
 -                                    int optname)
 -{
 -      return aa_sock_opt_perm(OP_SETSOCKOPT, AA_MAY_SETOPT, sock,
 -                              level, optname);
 -}
 -
 -/**
 - * apparmor_socket_shutdown - check perms before shutting down @sock conn
 - */
 -static int apparmor_socket_shutdown(struct socket *sock, int how)
 -{
 -      return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock);
 -}
 -
 -/**
 - * apparmor_socket_sock_recv_skb - check perms before associating skb to sk
 - *
 - * Note: can not sleep may be called with locks held
 - *
 - * dont want protocol specific in __skb_recv_datagram()
 - * to deny an incoming connection  socket_sock_rcv_skb()
 - */
 -static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
 -{
 -      return 0;
 -}
 -
 -
 -static struct aa_label *sk_peer_label(struct sock *sk)
 -{
 -      struct aa_sk_ctx *ctx = SK_CTX(sk);
 -
 -      if (ctx->peer)
 -              return ctx->peer;
 -
 -      return ERR_PTR(-ENOPROTOOPT);
 -}
 -
 -/**
 - * apparmor_socket_getpeersec_stream - get security context of peer
 - *
 - * Note: for tcp only valid if using ipsec or cipso on lan
 - */
 -static int apparmor_socket_getpeersec_stream(struct socket *sock,
 -                                           char __user *optval,
 -                                           int __user *optlen,
 -                                           unsigned int len)
 -{
 -      char *name;
 -      int slen, error = 0;
 -      struct aa_label *label;
 -      struct aa_label *peer;
 -
 -      label = begin_current_label_crit_section();
 -      peer = sk_peer_label(sock->sk);
 -      if (IS_ERR(peer)) {
 -              error = PTR_ERR(peer);
 -              goto done;
 -      }
 -      slen = aa_label_asxprint(&name, labels_ns(label), peer,
 -                               FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
 -                               FLAG_HIDDEN_UNCONFINED, GFP_KERNEL);
 -      /* don't include terminating \0 in slen, it breaks some apps */
 -      if (slen < 0) {
 -              error = -ENOMEM;
 -      } else {
 -              if (slen > len) {
 -                      error = -ERANGE;
 -              } else if (copy_to_user(optval, name, slen)) {
 -                      error = -EFAULT;
 -                      goto out;
 -              }
 -              if (put_user(slen, optlen))
 -                      error = -EFAULT;
 -out:
 -              kfree(name);
 -
 -      }
 -
 -done:
 -      end_current_label_crit_section(label);
 -
 -      return error;
 -}
 -
 -/**
 - * apparmor_socket_getpeersec_dgram - get security label of packet
 - * @sock: the peer socket
 - * @skb: packet data
 - * @secid: pointer to where to put the secid of the packet
 - *
 - * Sets the netlabel socket state on sk from parent
 - */
 -static int apparmor_socket_getpeersec_dgram(struct socket *sock,
 -                                          struct sk_buff *skb, u32 *secid)
 -
 -{
 -      /* TODO: requires secid support */
 -      return -ENOPROTOOPT;
 -}
 -
 -/**
 - * apparmor_sock_graft - Initialize newly created socket
 - * @sk: child sock
 - * @parent: parent socket
 - *
 - * Note: could set off of SOCK_CTX(parent) but need to track inode and we can
 - *       just set sk security information off of current creating process label
 - *       Labeling of sk for accept case - probably should be sock based
 - *       instead of task, because of the case where an implicitly labeled
 - *       socket is shared by different tasks.
 - */
 -static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
 -{
 -      struct aa_sk_ctx *ctx = SK_CTX(sk);
 -
 -      if (!ctx->label)
 -              ctx->label = aa_get_current_label();
 -}
 -
  static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
        LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
        LSM_HOOK_INIT(getprocattr, apparmor_getprocattr),
        LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
  
 -      LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security),
 -      LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security),
 -      LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security),
 -
 -      LSM_HOOK_INIT(socket_create, apparmor_socket_create),
 -      LSM_HOOK_INIT(socket_post_create, apparmor_socket_post_create),
 -      LSM_HOOK_INIT(socket_bind, apparmor_socket_bind),
 -      LSM_HOOK_INIT(socket_connect, apparmor_socket_connect),
 -      LSM_HOOK_INIT(socket_listen, apparmor_socket_listen),
 -      LSM_HOOK_INIT(socket_accept, apparmor_socket_accept),
 -      LSM_HOOK_INIT(socket_sendmsg, apparmor_socket_sendmsg),
 -      LSM_HOOK_INIT(socket_recvmsg, apparmor_socket_recvmsg),
 -      LSM_HOOK_INIT(socket_getsockname, apparmor_socket_getsockname),
 -      LSM_HOOK_INIT(socket_getpeername, apparmor_socket_getpeername),
 -      LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt),
 -      LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt),
 -      LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown),
 -      LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb),
 -      LSM_HOOK_INIT(socket_getpeersec_stream,
 -                    apparmor_socket_getpeersec_stream),
 -      LSM_HOOK_INIT(socket_getpeersec_dgram,
 -                    apparmor_socket_getpeersec_dgram),
 -      LSM_HOOK_INIT(sock_graft, apparmor_sock_graft),
 -
        LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank),
        LSM_HOOK_INIT(cred_free, apparmor_cred_free),
        LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare),
@@@ -813,11 -1200,11 +813,11 @@@ static const struct kernel_param_ops pa
        .get = param_get_aalockpolicy
  };
  
- static int param_set_audit(const char *val, struct kernel_param *kp);
- static int param_get_audit(char *buffer, struct kernel_param *kp);
+ static int param_set_audit(const char *val, const struct kernel_param *kp);
+ static int param_get_audit(char *buffer, const struct kernel_param *kp);
  
- static int param_set_mode(const char *val, struct kernel_param *kp);
- static int param_get_mode(char *buffer, struct kernel_param *kp);
+ static int param_set_mode(const char *val, const struct kernel_param *kp);
+ static int param_get_mode(char *buffer, const struct kernel_param *kp);
  
  /* Flag values, also controllable via /sys/module/apparmor/parameters
   * We define special types as we want to do additional mediation.
@@@ -951,7 -1338,7 +951,7 @@@ static int param_get_aauint(char *buffe
        return param_get_uint(buffer, kp);
  }
  
- static int param_get_audit(char *buffer, struct kernel_param *kp)
+ static int param_get_audit(char *buffer, const struct kernel_param *kp)
  {
        if (!apparmor_enabled)
                return -EINVAL;
        return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]);
  }
  
- static int param_set_audit(const char *val, struct kernel_param *kp)
+ static int param_set_audit(const char *val, const struct kernel_param *kp)
  {
        int i;
  
        return -EINVAL;
  }
  
- static int param_get_mode(char *buffer, struct kernel_param *kp)
+ static int param_get_mode(char *buffer, const struct kernel_param *kp)
  {
        if (!apparmor_enabled)
                return -EINVAL;
        return sprintf(buffer, "%s", aa_profile_mode_names[aa_g_profile_mode]);
  }
  
- static int param_set_mode(const char *val, struct kernel_param *kp)
+ static int param_set_mode(const char *val, const struct kernel_param *kp)
  {
        int i;