]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - drivers/scsi/ufs/ufshcd.c
scsi: remove scsi_show_result()
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / ufs / ufshcd.c
index ba27215b8034460c8578fdd14295623ebc047e1b..9da319130da5837ce47ac9b6bda93c5534252bfa 100644 (file)
@@ -3,6 +3,7 @@
  *
  * This code is based on drivers/scsi/ufs/ufshcd.c
  * Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  *
  * Authors:
  *     Santosh Yaraganavi <santosh.sy@samsung.com>
  * circumstances will the contributor of this Program be liable for
  * any damages of any kind arising from your use or distribution of
  * this program.
+ *
+ * The Linux Foundation chooses to take subject only to the GPLv2
+ * license terms, and distributes only under these terms.
  */
 
 #include <linux/async.h>
+#include <linux/devfreq.h>
 
 #include "ufshcd.h"
 #include "unipro.h"
 
 #define UFSHCD_ENABLE_INTRS    (UTP_TRANSFER_REQ_COMPL |\
                                 UTP_TASK_REQ_COMPL |\
-                                UIC_POWER_MODE |\
                                 UFSHCD_ERROR_MASK)
 /* UIC command timeout, unit: ms */
 #define UIC_CMD_TIMEOUT        500
 /* Task management command timeout */
 #define TM_CMD_TIMEOUT 100 /* msecs */
 
+/* maximum number of link-startup retries */
+#define DME_LINKSTARTUP_RETRIES 3
+
+/* maximum number of reset retries before giving up */
+#define MAX_HOST_RESET_RETRIES 5
+
 /* Expose the flag value from utp_upiu_query.value */
 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
 
 /* Interrupt aggregation default timeout, unit: 40us */
 #define INT_AGGR_DEF_TO        0x02
 
+#define ufshcd_toggle_vreg(_dev, _vreg, _on)                           \
+       ({                                                              \
+               int _ret;                                               \
+               if (_on)                                                \
+                       _ret = ufshcd_enable_vreg(_dev, _vreg);         \
+               else                                                    \
+                       _ret = ufshcd_disable_vreg(_dev, _vreg);        \
+               _ret;                                                   \
+       })
+
+static u32 ufs_query_desc_max_size[] = {
+       QUERY_DESC_DEVICE_MAX_SIZE,
+       QUERY_DESC_CONFIGURAION_MAX_SIZE,
+       QUERY_DESC_UNIT_MAX_SIZE,
+       QUERY_DESC_RFU_MAX_SIZE,
+       QUERY_DESC_INTERCONNECT_MAX_SIZE,
+       QUERY_DESC_STRING_MAX_SIZE,
+       QUERY_DESC_RFU_MAX_SIZE,
+       QUERY_DESC_GEOMETRY_MAZ_SIZE,
+       QUERY_DESC_POWER_MAX_SIZE,
+       QUERY_DESC_RFU_MAX_SIZE,
+};
+
 enum {
        UFSHCD_MAX_CHANNEL      = 0,
        UFSHCD_MAX_ID           = 1,
-       UFSHCD_MAX_LUNS         = 8,
        UFSHCD_CMD_PER_LUN      = 32,
        UFSHCD_CAN_QUEUE        = 32,
 };
@@ -106,12 +138,79 @@ enum {
 #define ufshcd_clear_eh_in_progress(h) \
        (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
 
+#define ufshcd_set_ufs_dev_active(h) \
+       ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
+#define ufshcd_set_ufs_dev_sleep(h) \
+       ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
+#define ufshcd_set_ufs_dev_poweroff(h) \
+       ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_is_ufs_dev_active(h) \
+       ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
+#define ufshcd_is_ufs_dev_sleep(h) \
+       ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
+#define ufshcd_is_ufs_dev_poweroff(h) \
+       ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
+
+static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
+       {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+       {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+       {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+       {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+       {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+       {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
+};
+
+static inline enum ufs_dev_pwr_mode
+ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
+{
+       return ufs_pm_lvl_states[lvl].dev_state;
+}
+
+static inline enum uic_link_state
+ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
+{
+       return ufs_pm_lvl_states[lvl].link_state;
+}
+
 static void ufshcd_tmc_handler(struct ufs_hba *hba);
 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
-static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba,
-                                       struct scsi_device *sdev);
+static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_probe_hba(struct ufs_hba *hba);
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+                                bool skip_ref_clk);
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
+static irqreturn_t ufshcd_intr(int irq, void *__hba);
+static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+               struct ufs_pa_layer_attr *desired_pwr_mode);
+
+static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+{
+       int ret = 0;
+
+       if (!hba->is_irq_enabled) {
+               ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
+                               hba);
+               if (ret)
+                       dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
+                               __func__, ret);
+               hba->is_irq_enabled = true;
+       }
+
+       return ret;
+}
+
+static inline void ufshcd_disable_irq(struct ufs_hba *hba)
+{
+       if (hba->is_irq_enabled) {
+               free_irq(hba->irq, hba);
+               hba->is_irq_enabled = false;
+       }
+}
 
 /*
  * ufshcd_wait_for_register - wait for register value to change
@@ -175,13 +274,14 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
 /**
  * ufshcd_is_device_present - Check if any device connected to
  *                           the host controller
- * @reg_hcs - host controller status register value
+ * @hba: pointer to adapter instance
  *
  * Returns 1 if device present, 0 if no device detected
  */
-static inline int ufshcd_is_device_present(u32 reg_hcs)
+static inline int ufshcd_is_device_present(struct ufs_hba *hba)
 {
-       return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
+       return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
+                                               DEVICE_PRESENT) ? 1 : 0;
 }
 
 /**
@@ -413,6 +513,265 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
        return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
 }
 
+static void ufshcd_ungate_work(struct work_struct *work)
+{
+       int ret;
+       unsigned long flags;
+       struct ufs_hba *hba = container_of(work, struct ufs_hba,
+                       clk_gating.ungate_work);
+
+       cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (hba->clk_gating.state == CLKS_ON) {
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               goto unblock_reqs;
+       }
+
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       ufshcd_setup_clocks(hba, true);
+
+       /* Exit from hibern8 */
+       if (ufshcd_can_hibern8_during_gating(hba)) {
+               /* Prevent gating in this path */
+               hba->clk_gating.is_suspended = true;
+               if (ufshcd_is_link_hibern8(hba)) {
+                       ret = ufshcd_uic_hibern8_exit(hba);
+                       if (ret)
+                               dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+                                       __func__, ret);
+                       else
+                               ufshcd_set_link_active(hba);
+               }
+               hba->clk_gating.is_suspended = false;
+       }
+unblock_reqs:
+       if (ufshcd_is_clkscaling_enabled(hba))
+               devfreq_resume_device(hba->devfreq);
+       scsi_unblock_requests(hba->host);
+}
+
+/**
+ * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
+ * Also, exit from hibern8 mode and set the link as active.
+ * @hba: per adapter instance
+ * @async: This indicates whether caller should ungate clocks asynchronously.
+ */
+int ufshcd_hold(struct ufs_hba *hba, bool async)
+{
+       int rc = 0;
+       unsigned long flags;
+
+       if (!ufshcd_is_clkgating_allowed(hba))
+               goto out;
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->clk_gating.active_reqs++;
+
+start:
+       switch (hba->clk_gating.state) {
+       case CLKS_ON:
+               break;
+       case REQ_CLKS_OFF:
+               if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+                       hba->clk_gating.state = CLKS_ON;
+                       break;
+               }
+               /*
+                * If we here, it means gating work is either done or
+                * currently running. Hence, fall through to cancel gating
+                * work and to enable clocks.
+                */
+       case CLKS_OFF:
+               scsi_block_requests(hba->host);
+               hba->clk_gating.state = REQ_CLKS_ON;
+               schedule_work(&hba->clk_gating.ungate_work);
+               /*
+                * fall through to check if we should wait for this
+                * work to be done or not.
+                */
+       case REQ_CLKS_ON:
+               if (async) {
+                       rc = -EAGAIN;
+                       hba->clk_gating.active_reqs--;
+                       break;
+               }
+
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               flush_work(&hba->clk_gating.ungate_work);
+               /* Make sure state is CLKS_ON before returning */
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               goto start;
+       default:
+               dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
+                               __func__, hba->clk_gating.state);
+               break;
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+       return rc;
+}
+
+static void ufshcd_gate_work(struct work_struct *work)
+{
+       struct ufs_hba *hba = container_of(work, struct ufs_hba,
+                       clk_gating.gate_work.work);
+       unsigned long flags;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (hba->clk_gating.is_suspended) {
+               hba->clk_gating.state = CLKS_ON;
+               goto rel_lock;
+       }
+
+       if (hba->clk_gating.active_reqs
+               || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+               || hba->lrb_in_use || hba->outstanding_tasks
+               || hba->active_uic_cmd || hba->uic_async_done)
+               goto rel_lock;
+
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       /* put the link into hibern8 mode before turning off clocks */
+       if (ufshcd_can_hibern8_during_gating(hba)) {
+               if (ufshcd_uic_hibern8_enter(hba)) {
+                       hba->clk_gating.state = CLKS_ON;
+                       goto out;
+               }
+               ufshcd_set_link_hibern8(hba);
+       }
+
+       if (ufshcd_is_clkscaling_enabled(hba)) {
+               devfreq_suspend_device(hba->devfreq);
+               hba->clk_scaling.window_start_t = 0;
+       }
+
+       if (!ufshcd_is_link_active(hba))
+               ufshcd_setup_clocks(hba, false);
+       else
+               /* If link is active, device ref_clk can't be switched off */
+               __ufshcd_setup_clocks(hba, false, true);
+
+       /*
+        * In case you are here to cancel this work the gating state
+        * would be marked as REQ_CLKS_ON. In this case keep the state
+        * as REQ_CLKS_ON which would anyway imply that clocks are off
+        * and a request to turn them on is pending. By doing this way,
+        * we keep the state machine in tact and this would ultimately
+        * prevent from doing cancel work multiple times when there are
+        * new requests arriving before the current cancel work is done.
+        */
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (hba->clk_gating.state == REQ_CLKS_OFF)
+               hba->clk_gating.state = CLKS_OFF;
+
+rel_lock:
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+       return;
+}
+
+/* host lock must be held before calling this variant */
+static void __ufshcd_release(struct ufs_hba *hba)
+{
+       if (!ufshcd_is_clkgating_allowed(hba))
+               return;
+
+       hba->clk_gating.active_reqs--;
+
+       if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
+               || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+               || hba->lrb_in_use || hba->outstanding_tasks
+               || hba->active_uic_cmd || hba->uic_async_done)
+               return;
+
+       hba->clk_gating.state = REQ_CLKS_OFF;
+       schedule_delayed_work(&hba->clk_gating.gate_work,
+                       msecs_to_jiffies(hba->clk_gating.delay_ms));
+}
+
+void ufshcd_release(struct ufs_hba *hba)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       __ufshcd_release(hba);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
+}
+
+static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       unsigned long flags, value;
+
+       if (kstrtoul(buf, 0, &value))
+               return -EINVAL;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->clk_gating.delay_ms = value;
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       return count;
+}
+
+static void ufshcd_init_clk_gating(struct ufs_hba *hba)
+{
+       if (!ufshcd_is_clkgating_allowed(hba))
+               return;
+
+       hba->clk_gating.delay_ms = 150;
+       INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
+       INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+
+       hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
+       hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
+       sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
+       hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
+       hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+       if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
+               dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+}
+
+static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
+{
+       if (!ufshcd_is_clkgating_allowed(hba))
+               return;
+       device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+}
+
+/* Must be called with host lock acquired */
+static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
+{
+       if (!ufshcd_is_clkscaling_enabled(hba))
+               return;
+
+       if (!hba->clk_scaling.is_busy_started) {
+               hba->clk_scaling.busy_start_t = ktime_get();
+               hba->clk_scaling.is_busy_started = true;
+       }
+}
+
+static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
+{
+       struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+
+       if (!ufshcd_is_clkscaling_enabled(hba))
+               return;
+
+       if (!hba->outstanding_reqs && scaling->is_busy_started) {
+               scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+                                       scaling->busy_start_t));
+               scaling->busy_start_t = ktime_set(0, 0);
+               scaling->is_busy_started = false;
+       }
+}
 /**
  * ufshcd_send_command - Send SCSI or device management commands
  * @hba: per adapter instance
@@ -421,6 +780,7 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
 static inline
 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 {
+       ufshcd_clk_scaling_start_busy(hba);
        __set_bit(task_tag, &hba->outstanding_reqs);
        ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 }
@@ -576,15 +936,12 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  * @uic_cmd: UIC command
  *
  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
- * with mutex held.
+ * with mutex held and host_lock locked.
  * Returns 0 only if success.
  */
 static int
 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 {
-       int ret;
-       unsigned long flags;
-
        if (!ufshcd_ready_for_uic_cmd(hba)) {
                dev_err(hba->dev,
                        "Controller not ready to accept UIC commands\n");
@@ -593,13 +950,9 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 
        init_completion(&uic_cmd->done);
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
        ufshcd_dispatch_uic_cmd(hba, uic_cmd);
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-       ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
-
-       return ret;
+       return 0;
 }
 
 /**
@@ -613,11 +966,19 @@ static int
 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 {
        int ret;
+       unsigned long flags;
 
+       ufshcd_hold(hba, false);
        mutex_lock(&hba->uic_cmd_mutex);
+       spin_lock_irqsave(hba->host->host_lock, flags);
        ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       if (!ret)
+               ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+
        mutex_unlock(&hba->uic_cmd_mutex);
 
+       ufshcd_release(hba);
        return ret;
 }
 
@@ -867,6 +1228,32 @@ static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        return ret;
 }
 
+/*
+ * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
+ * @scsi_lun: scsi LUN id
+ *
+ * Returns UPIU LUN id
+ */
+static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
+{
+       if (scsi_is_wlun(scsi_lun))
+               return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
+                       | UFS_UPIU_WLUN_ID;
+       else
+               return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
+}
+
+/**
+ * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
+ * @scsi_lun: UPIU W-LUN id
+ *
+ * Returns SCSI W-LUN id
+ */
+static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
+{
+       return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
+}
+
 /**
  * ufshcd_queuecommand - main entry point for SCSI requests
  * @cmd: command from SCSI Midlayer
@@ -918,6 +1305,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
                goto out;
        }
 
+       err = ufshcd_hold(hba, true);
+       if (err) {
+               err = SCSI_MLQUEUE_HOST_BUSY;
+               clear_bit_unlock(tag, &hba->lrb_in_use);
+               goto out;
+       }
+       WARN_ON(hba->clk_gating.state != CLKS_ON);
+
        lrbp = &hba->lrb[tag];
 
        WARN_ON(lrbp->cmd);
@@ -925,7 +1320,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
        lrbp->sense_buffer = cmd->sense_buffer;
        lrbp->task_tag = tag;
-       lrbp->lun = cmd->device->lun;
+       lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
        lrbp->intr_cmd = false;
        lrbp->command_type = UTP_CMD_TYPE_SCSI;
 
@@ -1193,6 +1588,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 
        BUG_ON(!hba);
 
+       ufshcd_hold(hba, false);
        mutex_lock(&hba->dev_cmd.lock);
        ufshcd_init_query(hba, &request, &response, opcode, idn, index,
                        selector);
@@ -1236,6 +1632,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 
 out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
+       ufshcd_release(hba);
        return err;
 }
 
@@ -1259,6 +1656,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 
        BUG_ON(!hba);
 
+       ufshcd_hold(hba, false);
        if (!attr_val) {
                dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
                                __func__, opcode);
@@ -1298,6 +1696,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
 out:
+       ufshcd_release(hba);
        return err;
 }
 
@@ -1325,6 +1724,7 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
 
        BUG_ON(!hba);
 
+       ufshcd_hold(hba, false);
        if (!desc_buf) {
                dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
                                __func__, opcode);
@@ -1374,9 +1774,119 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
 out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
 out:
+       ufshcd_release(hba);
        return err;
 }
 
+/**
+ * ufshcd_read_desc_param - read the specified descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_param(struct ufs_hba *hba,
+                                 enum desc_idn desc_id,
+                                 int desc_index,
+                                 u32 param_offset,
+                                 u8 *param_read_buf,
+                                 u32 param_size)
+{
+       int ret;
+       u8 *desc_buf;
+       u32 buff_len;
+       bool is_kmalloc = true;
+
+       /* safety checks */
+       if (desc_id >= QUERY_DESC_IDN_MAX)
+               return -EINVAL;
+
+       buff_len = ufs_query_desc_max_size[desc_id];
+       if ((param_offset + param_size) > buff_len)
+               return -EINVAL;
+
+       if (!param_offset && (param_size == buff_len)) {
+               /* memory space already available to hold full descriptor */
+               desc_buf = param_read_buf;
+               is_kmalloc = false;
+       } else {
+               /* allocate memory to hold full descriptor */
+               desc_buf = kmalloc(buff_len, GFP_KERNEL);
+               if (!desc_buf)
+                       return -ENOMEM;
+       }
+
+       ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
+                                     desc_id, desc_index, 0, desc_buf,
+                                     &buff_len);
+
+       if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
+           (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
+            ufs_query_desc_max_size[desc_id])
+           || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
+               dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
+                       __func__, desc_id, param_offset, buff_len, ret);
+               if (!ret)
+                       ret = -EINVAL;
+
+               goto out;
+       }
+
+       if (is_kmalloc)
+               memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+out:
+       if (is_kmalloc)
+               kfree(desc_buf);
+       return ret;
+}
+
+static inline int ufshcd_read_desc(struct ufs_hba *hba,
+                                  enum desc_idn desc_id,
+                                  int desc_index,
+                                  u8 *buf,
+                                  u32 size)
+{
+       return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
+}
+
+static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
+                                        u8 *buf,
+                                        u32 size)
+{
+       return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+}
+
+/**
+ * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @lun: lun id
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
+                                             int lun,
+                                             enum unit_desc_param param_offset,
+                                             u8 *param_read_buf,
+                                             u32 param_size)
+{
+       /*
+        * Unit descriptors are only available for general purpose LUs (LUN id
+        * from 0 to 7) and RPMB Well known LU.
+        */
+       if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
+               return -EOPNOTSUPP;
+
+       return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
+                                     param_offset, param_read_buf, param_size);
+}
+
 /**
  * ufshcd_memory_alloc - allocate memory for host memory space data structures
  * @hba: per adapter instance
@@ -1621,44 +2131,54 @@ out:
 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
 
 /**
- * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
- *                             using DME_SET primitives.
+ * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
+ * state) and waits for it to take effect.
+ *
  * @hba: per adapter instance
- * @mode: powr mode value
+ * @cmd: UIC command to execute
+ *
+ * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
+ * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
+ * and device UniPro link and hence it's final completion would be indicated by
+ * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
+ * addition to normal UIC command completion Status (UCCS). This function only
+ * returns after the relevant status bits indicate the completion.
  *
  * Returns 0 on success, non-zero value on failure
  */
-static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 {
-       struct uic_command uic_cmd = {0};
-       struct completion pwr_done;
+       struct completion uic_async_done;
        unsigned long flags;
        u8 status;
        int ret;
 
-       uic_cmd.command = UIC_CMD_DME_SET;
-       uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
-       uic_cmd.argument3 = mode;
-       init_completion(&pwr_done);
-
        mutex_lock(&hba->uic_cmd_mutex);
+       init_completion(&uic_async_done);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       hba->pwr_done = &pwr_done;
+       hba->uic_async_done = &uic_async_done;
+       ret = __ufshcd_send_uic_cmd(hba, cmd);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
-       ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
        if (ret) {
                dev_err(hba->dev,
-                       "pwr mode change with mode 0x%x uic error %d\n",
-                       mode, ret);
+                       "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+                       cmd->command, cmd->argument3, ret);
+               goto out;
+       }
+       ret = ufshcd_wait_for_uic_cmd(hba, cmd);
+       if (ret) {
+               dev_err(hba->dev,
+                       "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+                       cmd->command, cmd->argument3, ret);
                goto out;
        }
 
-       if (!wait_for_completion_timeout(hba->pwr_done,
+       if (!wait_for_completion_timeout(hba->uic_async_done,
                                         msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
                dev_err(hba->dev,
-                       "pwr mode change with mode 0x%x completion timeout\n",
-                       mode);
+                       "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
+                       cmd->command, cmd->argument3);
                ret = -ETIMEDOUT;
                goto out;
        }
@@ -1666,85 +2186,220 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
        status = ufshcd_get_upmcrs(hba);
        if (status != PWR_LOCAL) {
                dev_err(hba->dev,
-                       "pwr mode change failed, host umpcrs:0x%x\n",
-                       status);
+                       "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
+                       cmd->command, status);
                ret = (status != PWR_OK) ? status : -1;
        }
 out:
        spin_lock_irqsave(hba->host->host_lock, flags);
-       hba->pwr_done = NULL;
+       hba->uic_async_done = NULL;
        spin_unlock_irqrestore(hba->host->host_lock, flags);
        mutex_unlock(&hba->uic_cmd_mutex);
+
        return ret;
 }
 
 /**
- * ufshcd_config_max_pwr_mode - Set & Change power mode with
- *     maximum capability attribute information.
+ * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
+ *                             using DME_SET primitives.
  * @hba: per adapter instance
+ * @mode: powr mode value
  *
  * Returns 0 on success, non-zero value on failure
  */
-static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
+static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
 {
-       enum {RX = 0, TX = 1};
-       u32 lanes[] = {1, 1};
-       u32 gear[] = {1, 1};
-       u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
+       struct uic_command uic_cmd = {0};
        int ret;
 
-       /* Get the connected lane count */
-       ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
-       ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
+       uic_cmd.command = UIC_CMD_DME_SET;
+       uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
+       uic_cmd.argument3 = mode;
+       ufshcd_hold(hba, false);
+       ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+       ufshcd_release(hba);
 
-       /*
-        * First, get the maximum gears of HS speed.
-        * If a zero value, it means there is no HSGEAR capability.
-        * Then, get the maximum gears of PWM speed.
-        */
-       ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
-       if (!gear[RX]) {
-               ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
-               pwr[RX] = SLOWAUTO_MODE;
-       }
+       return ret;
+}
 
-       ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
-       if (!gear[TX]) {
-               ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
-                                   &gear[TX]);
-               pwr[TX] = SLOWAUTO_MODE;
-       }
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+       struct uic_command uic_cmd = {0};
 
-       /*
-        * Configure attributes for power mode change with below.
-        * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
-        * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
-        * - PA_HSSERIES
-        */
-       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
-       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
-       if (pwr[RX] == FASTAUTO_MODE)
-               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+       uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
 
-       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
-       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
-       if (pwr[TX] == FASTAUTO_MODE)
-               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+       return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+}
 
-       if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
-               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+{
+       struct uic_command uic_cmd = {0};
+       int ret;
 
-       ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
-       if (ret)
-               dev_err(hba->dev,
-                       "pwr_mode: power mode change failed %d\n", ret);
+       uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
+       ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+       if (ret) {
+               ufshcd_set_link_off(hba);
+               ret = ufshcd_host_reset_and_restore(hba);
+       }
 
        return ret;
 }
 
 /**
- * ufshcd_complete_dev_init() - checks device readiness
- * hba: per-adapter instance
+ * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
+ * @hba: per-adapter instance
+ */
+static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
+{
+       struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+
+       if (hba->max_pwr_info.is_valid)
+               return 0;
+
+       pwr_info->pwr_tx = FASTAUTO_MODE;
+       pwr_info->pwr_rx = FASTAUTO_MODE;
+       pwr_info->hs_rate = PA_HS_MODE_B;
+
+       /* Get the connected lane count */
+       ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
+                       &pwr_info->lane_rx);
+       ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+                       &pwr_info->lane_tx);
+
+       if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
+               dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
+                               __func__,
+                               pwr_info->lane_rx,
+                               pwr_info->lane_tx);
+               return -EINVAL;
+       }
+
+       /*
+        * First, get the maximum gears of HS speed.
+        * If a zero value, it means there is no HSGEAR capability.
+        * Then, get the maximum gears of PWM speed.
+        */
+       ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
+       if (!pwr_info->gear_rx) {
+               ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+                               &pwr_info->gear_rx);
+               if (!pwr_info->gear_rx) {
+                       dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
+                               __func__, pwr_info->gear_rx);
+                       return -EINVAL;
+               }
+               pwr_info->pwr_rx = SLOWAUTO_MODE;
+       }
+
+       ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
+                       &pwr_info->gear_tx);
+       if (!pwr_info->gear_tx) {
+               ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+                               &pwr_info->gear_tx);
+               if (!pwr_info->gear_tx) {
+                       dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
+                               __func__, pwr_info->gear_tx);
+                       return -EINVAL;
+               }
+               pwr_info->pwr_tx = SLOWAUTO_MODE;
+       }
+
+       hba->max_pwr_info.is_valid = true;
+       return 0;
+}
+
+static int ufshcd_change_power_mode(struct ufs_hba *hba,
+                            struct ufs_pa_layer_attr *pwr_mode)
+{
+       int ret;
+
+       /* if already configured to the requested pwr_mode */
+       if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+           pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
+           pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
+           pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
+           pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
+           pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
+           pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
+               dev_dbg(hba->dev, "%s: power already configured\n", __func__);
+               return 0;
+       }
+
+       /*
+        * Configure attributes for power mode change with below.
+        * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
+        * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
+        * - PA_HSSERIES
+        */
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
+                       pwr_mode->lane_rx);
+       if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+                       pwr_mode->pwr_rx == FAST_MODE)
+               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+       else
+               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
+
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
+                       pwr_mode->lane_tx);
+       if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
+                       pwr_mode->pwr_tx == FAST_MODE)
+               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+       else
+               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
+
+       if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+           pwr_mode->pwr_tx == FASTAUTO_MODE ||
+           pwr_mode->pwr_rx == FAST_MODE ||
+           pwr_mode->pwr_tx == FAST_MODE)
+               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+                                               pwr_mode->hs_rate);
+
+       ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
+                       | pwr_mode->pwr_tx);
+
+       if (ret) {
+               dev_err(hba->dev,
+                       "%s: power mode change failed %d\n", __func__, ret);
+       } else {
+               if (hba->vops && hba->vops->pwr_change_notify)
+                       hba->vops->pwr_change_notify(hba,
+                               POST_CHANGE, NULL, pwr_mode);
+
+               memcpy(&hba->pwr_info, pwr_mode,
+                       sizeof(struct ufs_pa_layer_attr));
+       }
+
+       return ret;
+}
+
+/**
+ * ufshcd_config_pwr_mode - configure a new power mode
+ * @hba: per-adapter instance
+ * @desired_pwr_mode: desired power configuration
+ */
+static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+               struct ufs_pa_layer_attr *desired_pwr_mode)
+{
+       struct ufs_pa_layer_attr final_params = { 0 };
+       int ret;
+
+       if (hba->vops && hba->vops->pwr_change_notify)
+               hba->vops->pwr_change_notify(hba,
+                    PRE_CHANGE, desired_pwr_mode, &final_params);
+       else
+               memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
+
+       ret = ufshcd_change_power_mode(hba, &final_params);
+
+       return ret;
+}
+
+/**
+ * ufshcd_complete_dev_init() - checks device readiness
+ * hba: per-adapter instance
  *
  * Set fDeviceInit flag and poll until device toggles it.
  */
@@ -1798,11 +2453,10 @@ out:
  * @hba: per adapter instance
  *
  * To bring UFS host controller to operational state,
- * 1. Check if device is present
- * 2. Enable required interrupts
- * 3. Configure interrupt aggregation
- * 4. Program UTRL and UTMRL base addres
- * 5. Configure run-stop-registers
+ * 1. Enable required interrupts
+ * 2. Configure interrupt aggregation
+ * 3. Program UTRL and UTMRL base addres
+ * 4. Configure run-stop-registers
  *
  * Returns 0 on success, non-zero value on failure
  */
@@ -1811,14 +2465,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
        int err = 0;
        u32 reg;
 
-       /* check if device present */
-       reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
-       if (!ufshcd_is_device_present(reg)) {
-               dev_err(hba->dev, "cc: Device not present\n");
-               err = -ENXIO;
-               goto out;
-       }
-
        /* Enable required interrupts */
        ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
 
@@ -1839,6 +2485,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
         * UCRDY, UTMRLDY and UTRLRDY bits must be 1
         * DEI, HEI bits must be 0
         */
+       reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
        if (!(ufshcd_get_lists_status(reg))) {
                ufshcd_enable_run_stop_reg(hba);
        } else {
@@ -1885,6 +2532,12 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
                msleep(5);
        }
 
+       /* UniPro link is disabled at this point */
+       ufshcd_set_link_off(hba);
+
+       if (hba->vops && hba->vops->hce_enable_notify)
+               hba->vops->hce_enable_notify(hba, PRE_CHANGE);
+
        /* start controller initialization sequence */
        ufshcd_hba_start(hba);
 
@@ -1912,6 +2565,13 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
                }
                msleep(5);
        }
+
+       /* enable UIC related interrupts */
+       ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+
+       if (hba->vops && hba->vops->hce_enable_notify)
+               hba->vops->hce_enable_notify(hba, POST_CHANGE);
+
        return 0;
 }
 
@@ -1924,16 +2584,42 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
 static int ufshcd_link_startup(struct ufs_hba *hba)
 {
        int ret;
+       int retries = DME_LINKSTARTUP_RETRIES;
 
-       /* enable UIC related interrupts */
-       ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+       do {
+               if (hba->vops && hba->vops->link_startup_notify)
+                       hba->vops->link_startup_notify(hba, PRE_CHANGE);
+
+               ret = ufshcd_dme_link_startup(hba);
+
+               /* check if device is detected by inter-connect layer */
+               if (!ret && !ufshcd_is_device_present(hba)) {
+                       dev_err(hba->dev, "%s: Device not present\n", __func__);
+                       ret = -ENXIO;
+                       goto out;
+               }
+
+               /*
+                * DME link lost indication is only received when link is up,
+                * but we can't be sure if the link is up until link startup
+                * succeeds. So reset the local Uni-Pro and try again.
+                */
+               if (ret && ufshcd_hba_enable(hba))
+                       goto out;
+       } while (ret && retries--);
 
-       ret = ufshcd_dme_link_startup(hba);
        if (ret)
+               /* failed to get the link up... retire */
                goto out;
 
-       ret = ufshcd_make_hba_operational(hba);
+       /* Include any host controller configuration via UIC commands */
+       if (hba->vops && hba->vops->link_startup_notify) {
+               ret = hba->vops->link_startup_notify(hba, POST_CHANGE);
+               if (ret)
+                       goto out;
+       }
 
+       ret = ufshcd_make_hba_operational(hba);
 out:
        if (ret)
                dev_err(hba->dev, "link startup failed %d\n", ret);
@@ -1955,6 +2641,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
        int err = 0;
        int retries;
 
+       ufshcd_hold(hba, false);
        mutex_lock(&hba->dev_cmd.lock);
        for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
                err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
@@ -1966,12 +2653,107 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
                dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
        }
        mutex_unlock(&hba->dev_cmd.lock);
+       ufshcd_release(hba);
 
        if (err)
                dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
        return err;
 }
 
+/**
+ * ufshcd_set_queue_depth - set lun queue depth
+ * @sdev: pointer to SCSI device
+ *
+ * Read bLUQueueDepth value and activate scsi tagged command
+ * queueing. For WLUN, queue depth is set to 1. For best-effort
+ * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
+ * value that host can queue.
+ */
+static void ufshcd_set_queue_depth(struct scsi_device *sdev)
+{
+       int ret = 0;
+       u8 lun_qdepth;
+       struct ufs_hba *hba;
+
+       hba = shost_priv(sdev->host);
+
+       lun_qdepth = hba->nutrs;
+       ret = ufshcd_read_unit_desc_param(hba,
+                                         ufshcd_scsi_to_upiu_lun(sdev->lun),
+                                         UNIT_DESC_PARAM_LU_Q_DEPTH,
+                                         &lun_qdepth,
+                                         sizeof(lun_qdepth));
+
+       /* Some WLUN doesn't support unit descriptor */
+       if (ret == -EOPNOTSUPP)
+               lun_qdepth = 1;
+       else if (!lun_qdepth)
+               /* eventually, we can figure out the real queue depth */
+               lun_qdepth = hba->nutrs;
+       else
+               lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
+
+       dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
+                       __func__, lun_qdepth);
+       scsi_activate_tcq(sdev, lun_qdepth);
+}
+
+/*
+ * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
+ * @hba: per-adapter instance
+ * @lun: UFS device lun id
+ * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
+ *
+ * Returns 0 in case of success and b_lu_write_protect status would be returned
+ * @b_lu_write_protect parameter.
+ * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
+ * Returns -EINVAL in case of invalid parameters passed to this function.
+ */
+static int ufshcd_get_lu_wp(struct ufs_hba *hba,
+                           u8 lun,
+                           u8 *b_lu_write_protect)
+{
+       int ret;
+
+       if (!b_lu_write_protect)
+               ret = -EINVAL;
+       /*
+        * According to UFS device spec, RPMB LU can't be write
+        * protected so skip reading bLUWriteProtect parameter for
+        * it. For other W-LUs, UNIT DESCRIPTOR is not available.
+        */
+       else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
+               ret = -ENOTSUPP;
+       else
+               ret = ufshcd_read_unit_desc_param(hba,
+                                         lun,
+                                         UNIT_DESC_PARAM_LU_WR_PROTECT,
+                                         b_lu_write_protect,
+                                         sizeof(*b_lu_write_protect));
+       return ret;
+}
+
+/**
+ * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
+ * status
+ * @hba: per-adapter instance
+ * @sdev: pointer to SCSI device
+ *
+ */
+static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
+                                                   struct scsi_device *sdev)
+{
+       if (hba->dev_info.f_power_on_wp_en &&
+           !hba->dev_info.is_lu_power_on_wp) {
+               u8 b_lu_write_protect;
+
+               if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
+                                     &b_lu_write_protect) &&
+                   (b_lu_write_protect == UFS_LU_POWER_ON_WP))
+                       hba->dev_info.is_lu_power_on_wp = true;
+       }
+}
+
 /**
  * ufshcd_slave_alloc - handle initial SCSI device configurations
  * @sdev: pointer to SCSI device
@@ -1981,7 +2763,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
 static int ufshcd_slave_alloc(struct scsi_device *sdev)
 {
        struct ufs_hba *hba;
-       int lun_qdepth;
 
        hba = shost_priv(sdev->host);
        sdev->tagged_supported = 1;
@@ -1996,16 +2777,10 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
        /* REPORT SUPPORTED OPERATION CODES is not supported */
        sdev->no_report_opcodes = 1;
 
-       lun_qdepth = ufshcd_read_sdev_qdepth(hba, sdev);
-       if (lun_qdepth <= 0)
-               /* eventually, we can figure out the real queue depth */
-               lun_qdepth = hba->nutrs;
-       else
-               lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
 
-       dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
-                       __func__, lun_qdepth);
-       scsi_activate_tcq(sdev, lun_qdepth);
+       ufshcd_set_queue_depth(sdev);
+
+       ufshcd_get_lu_power_on_wp_status(hba, sdev);
 
        return 0;
 }
@@ -2068,6 +2843,9 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
 
        hba = shost_priv(sdev->host);
        scsi_deactivate_tcq(sdev, hba->nutrs);
+       /* Drop the reference as it won't be needed anymore */
+       if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN)
+               hba->sdev_ufs_device = NULL;
 }
 
 /**
@@ -2234,8 +3012,8 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
                complete(&hba->active_uic_cmd->done);
        }
 
-       if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
-               complete(hba->pwr_done);
+       if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+               complete(hba->uic_async_done);
 }
 
 /**
@@ -2275,6 +3053,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
                        clear_bit_unlock(index, &hba->lrb_in_use);
                        /* Do not touch lrbp after scsi done */
                        cmd->scsi_done(cmd);
+                       __ufshcd_release(hba);
                } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
                        if (hba->dev_cmd.complete)
                                complete(hba->dev_cmd.complete);
@@ -2284,6 +3063,8 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
        /* clear corresponding bits of completed commands */
        hba->outstanding_reqs ^= completed_reqs;
 
+       ufshcd_clk_scaling_update_busy(hba);
+
        /* we might have free'd some tags above */
        wake_up(&hba->dev_cmd.tag_wq);
 }
@@ -2447,33 +3228,62 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
 }
 
 /**
- * ufshcd_urgent_bkops - handle urgent bkops exception event
+ * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
  * @hba: per-adapter instance
+ * @status: bkops_status value
  *
- * Enable fBackgroundOpsEn flag in the device to permit background
- * operations.
+ * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
+ * flag in the device to permit background operations if the device
+ * bkops_status is greater than or equal to "status" argument passed to
+ * this function, disable otherwise.
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ *
+ * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
+ * to know whether auto bkops is enabled or disabled after this function
+ * returns control to it.
  */
-static int ufshcd_urgent_bkops(struct ufs_hba *hba)
+static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
+                            enum bkops_status status)
 {
        int err;
-       u32 status = 0;
+       u32 curr_status = 0;
 
-       err = ufshcd_get_bkops_status(hba, &status);
+       err = ufshcd_get_bkops_status(hba, &curr_status);
        if (err) {
                dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
                                __func__, err);
                goto out;
+       } else if (curr_status > BKOPS_STATUS_MAX) {
+               dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
+                               __func__, curr_status);
+               err = -EINVAL;
+               goto out;
        }
 
-       status = status & 0xF;
-
-       /* handle only if status indicates performance impact or critical */
-       if (status >= BKOPS_STATUS_PERF_IMPACT)
+       if (curr_status >= status)
                err = ufshcd_enable_auto_bkops(hba);
+       else
+               err = ufshcd_disable_auto_bkops(hba);
 out:
        return err;
 }
 
+/**
+ * ufshcd_urgent_bkops - handle urgent bkops exception event
+ * @hba: per-adapter instance
+ *
+ * Enable fBackgroundOpsEn flag in the device to permit background
+ * operations.
+ *
+ * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
+ * and negative error value for any other failure.
+ */
+static int ufshcd_urgent_bkops(struct ufs_hba *hba)
+{
+       return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
+}
+
 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
 {
        return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -2505,7 +3315,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
        status &= hba->ee_ctrl_mask;
        if (status & MASK_EE_URGENT_BKOPS) {
                err = ufshcd_urgent_bkops(hba);
-               if (err)
+               if (err < 0)
                        dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
                                        __func__, err);
        }
@@ -2530,6 +3340,7 @@ static void ufshcd_err_handler(struct work_struct *work)
        hba = container_of(work, struct ufs_hba, eh_work);
 
        pm_runtime_get_sync(hba->dev);
+       ufshcd_hold(hba, false);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
@@ -2583,6 +3394,7 @@ static void ufshcd_err_handler(struct work_struct *work)
 
 out:
        scsi_unblock_requests(hba->host);
+       ufshcd_release(hba);
        pm_runtime_put_sync(hba->dev);
 }
 
@@ -2766,6 +3578,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
         * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
         */
        wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+       ufshcd_hold(hba, false);
 
        spin_lock_irqsave(host->host_lock, flags);
        task_req_descp = hba->utmrdl_base_addr;
@@ -2785,7 +3598,10 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
                                              lun_id, task_tag);
        task_req_upiup->header.dword_1 =
                UPIU_HEADER_DWORD(0, tm_function, 0, 0);
-
+       /*
+        * The host shall provide the same value for LUN field in the basic
+        * header and for Input Parameter.
+        */
        task_req_upiup->input_param1 = cpu_to_be32(lun_id);
        task_req_upiup->input_param2 = cpu_to_be32(task_id);
 
@@ -2814,6 +3630,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
        ufshcd_put_tm_slot(hba, free_slot);
        wake_up(&hba->tm_tag_wq);
 
+       ufshcd_release(hba);
        return err;
 }
 
@@ -2896,6 +3713,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
        hba = shost_priv(host);
        tag = cmd->request->tag;
 
+       ufshcd_hold(hba, false);
        /* If command is already aborted/completed, return SUCCESS */
        if (!(test_bit(tag, &hba->outstanding_reqs)))
                goto out;
@@ -2960,6 +3778,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
 
        clear_bit_unlock(tag, &hba->lrb_in_use);
        wake_up(&hba->dev_cmd.tag_wq);
+
 out:
        if (!err) {
                err = SUCCESS;
@@ -2968,6 +3787,11 @@ out:
                err = FAILED;
        }
 
+       /*
+        * This ufshcd_release() corresponds to the original scsi cmd that got
+        * aborted here (as we won't get any IRQ for it).
+        */
+       ufshcd_release(hba);
        return err;
 }
 
@@ -2984,7 +3808,6 @@ out:
 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
 {
        int err;
-       async_cookie_t cookie;
        unsigned long flags;
 
        /* Reset the host controller */
@@ -2997,10 +3820,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
                goto out;
 
        /* Establish the link again and restore the device */
-       cookie = async_schedule(ufshcd_async_scan, hba);
-       /* wait for async scan to be completed */
-       async_synchronize_cookie(++cookie);
-       if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+       err = ufshcd_probe_hba(hba);
+
+       if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
                err = -EIO;
 out:
        if (err)
@@ -3022,8 +3844,11 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
 {
        int err = 0;
        unsigned long flags;
+       int retries = MAX_HOST_RESET_RETRIES;
 
-       err = ufshcd_host_reset_and_restore(hba);
+       do {
+               err = ufshcd_host_reset_and_restore(hba);
+       } while (err && --retries);
 
        /*
         * After reset the door-bell might be cleared, complete
@@ -3051,6 +3876,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
 
        hba = shost_priv(cmd->device->host);
 
+       ufshcd_hold(hba, false);
        /*
         * Check if there is any race with fatal error handling.
         * If so, wait for it to complete. Even though fatal error
@@ -3084,56 +3910,232 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
        ufshcd_clear_eh_in_progress(hba);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+       ufshcd_release(hba);
        return err;
 }
 
 /**
- * ufshcd_read_sdev_qdepth - read the lun command queue depth
- * @hba: Pointer to adapter instance
- * @sdev: pointer to SCSI device
+ * ufshcd_get_max_icc_level - calculate the ICC level
+ * @sup_curr_uA: max. current supported by the regulator
+ * @start_scan: row at the desc table to start scan from
+ * @buff: power descriptor buffer
+ *
+ * Returns calculated max ICC level for specific regulator
+ */
+static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
+{
+       int i;
+       int curr_uA;
+       u16 data;
+       u16 unit;
+
+       for (i = start_scan; i >= 0; i--) {
+               data = be16_to_cpu(*((u16 *)(buff + 2*i)));
+               unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
+                                               ATTR_ICC_LVL_UNIT_OFFSET;
+               curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
+               switch (unit) {
+               case UFSHCD_NANO_AMP:
+                       curr_uA = curr_uA / 1000;
+                       break;
+               case UFSHCD_MILI_AMP:
+                       curr_uA = curr_uA * 1000;
+                       break;
+               case UFSHCD_AMP:
+                       curr_uA = curr_uA * 1000 * 1000;
+                       break;
+               case UFSHCD_MICRO_AMP:
+               default:
+                       break;
+               }
+               if (sup_curr_uA >= curr_uA)
+                       break;
+       }
+       if (i < 0) {
+               i = 0;
+               pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
+       }
+
+       return (u32)i;
+}
+
+/**
+ * ufshcd_calc_icc_level - calculate the max ICC level
+ * In case regulators are not initialized we'll return 0
+ * @hba: per-adapter instance
+ * @desc_buf: power descriptor buffer to extract ICC levels from.
+ * @len: length of desc_buff
  *
- * Return in case of success the lun's queue depth else error.
+ * Returns calculated ICC level
  */
-static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba,
-                               struct scsi_device *sdev)
+static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
+                                                       u8 *desc_buf, int len)
+{
+       u32 icc_level = 0;
+
+       if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
+                                               !hba->vreg_info.vccq2) {
+               dev_err(hba->dev,
+                       "%s: Regulator capability was not set, actvIccLevel=%d",
+                                                       __func__, icc_level);
+               goto out;
+       }
+
+       if (hba->vreg_info.vcc)
+               icc_level = ufshcd_get_max_icc_level(
+                               hba->vreg_info.vcc->max_uA,
+                               POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
+                               &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
+
+       if (hba->vreg_info.vccq)
+               icc_level = ufshcd_get_max_icc_level(
+                               hba->vreg_info.vccq->max_uA,
+                               icc_level,
+                               &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
+
+       if (hba->vreg_info.vccq2)
+               icc_level = ufshcd_get_max_icc_level(
+                               hba->vreg_info.vccq2->max_uA,
+                               icc_level,
+                               &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
+out:
+       return icc_level;
+}
+
+static void ufshcd_init_icc_levels(struct ufs_hba *hba)
 {
        int ret;
-       int buff_len = UNIT_DESC_MAX_SIZE;
-       u8 desc_buf[UNIT_DESC_MAX_SIZE];
+       int buff_len = QUERY_DESC_POWER_MAX_SIZE;
+       u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
 
-       ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
-                       QUERY_DESC_IDN_UNIT, sdev->lun, 0, desc_buf, &buff_len);
+       ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
+       if (ret) {
+               dev_err(hba->dev,
+                       "%s: Failed reading power descriptor.len = %d ret = %d",
+                       __func__, buff_len, ret);
+               return;
+       }
+
+       hba->init_prefetch_data.icc_level =
+                       ufshcd_find_max_sup_active_icc_level(hba,
+                       desc_buf, buff_len);
+       dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
+                       __func__, hba->init_prefetch_data.icc_level);
+
+       ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+                       QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
+                       &hba->init_prefetch_data.icc_level);
 
-       if (ret || (buff_len < UNIT_DESC_PARAM_LU_Q_DEPTH)) {
+       if (ret)
                dev_err(hba->dev,
-                       "%s:Failed reading unit descriptor. len = %d ret = %d"
-                       , __func__, buff_len, ret);
-               if (!ret)
-                       ret = -EINVAL;
+                       "%s: Failed configuring bActiveICCLevel = %d ret = %d",
+                       __func__, hba->init_prefetch_data.icc_level , ret);
+
+}
+
+/**
+ * ufshcd_scsi_add_wlus - Adds required W-LUs
+ * @hba: per-adapter instance
+ *
+ * UFS device specification requires the UFS devices to support 4 well known
+ * logical units:
+ *     "REPORT_LUNS" (address: 01h)
+ *     "UFS Device" (address: 50h)
+ *     "RPMB" (address: 44h)
+ *     "BOOT" (address: 30h)
+ * UFS device's power management needs to be controlled by "POWER CONDITION"
+ * field of SSU (START STOP UNIT) command. But this "power condition" field
+ * will take effect only when its sent to "UFS device" well known logical unit
+ * hence we require the scsi_device instance to represent this logical unit in
+ * order for the UFS host driver to send the SSU command for power management.
+
+ * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
+ * Block) LU so user space process can control this LU. User space may also
+ * want to have access to BOOT LU.
+
+ * This function adds scsi device instances for each of all well known LUs
+ * (except "REPORT LUNS" LU).
+ *
+ * Returns zero on success (all required W-LUs are added successfully),
+ * non-zero error value on failure (if failed to add any of the required W-LU).
+ */
+static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
+{
+       int ret = 0;
 
+       hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
+               ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
+       if (IS_ERR(hba->sdev_ufs_device)) {
+               ret = PTR_ERR(hba->sdev_ufs_device);
+               hba->sdev_ufs_device = NULL;
                goto out;
        }
 
-       ret = desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH] & 0xFF;
+       hba->sdev_boot = __scsi_add_device(hba->host, 0, 0,
+               ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
+       if (IS_ERR(hba->sdev_boot)) {
+               ret = PTR_ERR(hba->sdev_boot);
+               hba->sdev_boot = NULL;
+               goto remove_sdev_ufs_device;
+       }
+
+       hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+               ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
+       if (IS_ERR(hba->sdev_rpmb)) {
+               ret = PTR_ERR(hba->sdev_rpmb);
+               hba->sdev_rpmb = NULL;
+               goto remove_sdev_boot;
+       }
+       goto out;
+
+remove_sdev_boot:
+       scsi_remove_device(hba->sdev_boot);
+remove_sdev_ufs_device:
+       scsi_remove_device(hba->sdev_ufs_device);
 out:
        return ret;
 }
 
 /**
- * ufshcd_async_scan - asynchronous execution for link startup
- * @data: data pointer to pass to this function
- * @cookie: cookie data
+ * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by
+ *                          ufshcd_scsi_add_wlus()
+ * @hba: per-adapter instance
+ *
  */
-static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba)
+{
+       if (hba->sdev_ufs_device) {
+               scsi_remove_device(hba->sdev_ufs_device);
+               hba->sdev_ufs_device = NULL;
+       }
+
+       if (hba->sdev_boot) {
+               scsi_remove_device(hba->sdev_boot);
+               hba->sdev_boot = NULL;
+       }
+
+       if (hba->sdev_rpmb) {
+               scsi_remove_device(hba->sdev_rpmb);
+               hba->sdev_rpmb = NULL;
+       }
+}
+
+/**
+ * ufshcd_probe_hba - probe hba to detect device and initialize
+ * @hba: per-adapter instance
+ *
+ * Execute link-startup and verify device initialization
+ */
+static int ufshcd_probe_hba(struct ufs_hba *hba)
 {
-       struct ufs_hba *hba = (struct ufs_hba *)data;
        int ret;
 
        ret = ufshcd_link_startup(hba);
        if (ret)
                goto out;
 
-       ufshcd_config_max_pwr_mode(hba);
+       /* UniPro link is active now */
+       ufshcd_set_link_active(hba);
 
        ret = ufshcd_verify_dev_init(hba);
        if (ret)
@@ -3143,98 +4145,1045 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
        if (ret)
                goto out;
 
+       /* UFS device is also active now */
+       ufshcd_set_ufs_dev_active(hba);
        ufshcd_force_reset_auto_bkops(hba);
        hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+       hba->wlun_dev_clr_ua = true;
 
-       /* If we are in error handling context no need to scan the host */
-       if (!ufshcd_eh_in_progress(hba)) {
-               scsi_scan_host(hba->host);
-               pm_runtime_put_sync(hba->dev);
+       if (ufshcd_get_max_pwr_mode(hba)) {
+               dev_err(hba->dev,
+                       "%s: Failed getting max supported power mode\n",
+                       __func__);
+       } else {
+               ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
+               if (ret)
+                       dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+                                       __func__, ret);
        }
+
+       /*
+        * If we are in error handling context or in power management callbacks
+        * context, no need to scan the host
+        */
+       if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+               bool flag;
+
+               /* clear any previous UFS device information */
+               memset(&hba->dev_info, 0, sizeof(hba->dev_info));
+               if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+                                      QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+                       hba->dev_info.f_power_on_wp_en = flag;
+
+               if (!hba->is_init_prefetch)
+                       ufshcd_init_icc_levels(hba);
+
+               /* Add required well known logical units to scsi mid layer */
+               if (ufshcd_scsi_add_wlus(hba))
+                       goto out;
+
+               scsi_scan_host(hba->host);
+               pm_runtime_put_sync(hba->dev);
+       }
+
+       if (!hba->is_init_prefetch)
+               hba->is_init_prefetch = true;
+
+       /* Resume devfreq after UFS device is detected */
+       if (ufshcd_is_clkscaling_enabled(hba))
+               devfreq_resume_device(hba->devfreq);
+
+out:
+       /*
+        * If we failed to initialize the device or the device is not
+        * present, turn off the power/clocks etc.
+        */
+       if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+               pm_runtime_put_sync(hba->dev);
+               ufshcd_hba_exit(hba);
+       }
+
+       return ret;
+}
+
+/**
+ * ufshcd_async_scan - asynchronous execution for probing hba
+ * @data: data pointer to pass to this function
+ * @cookie: cookie data
+ */
+static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+{
+       struct ufs_hba *hba = (struct ufs_hba *)data;
+
+       ufshcd_probe_hba(hba);
+}
+
+static struct scsi_host_template ufshcd_driver_template = {
+       .module                 = THIS_MODULE,
+       .name                   = UFSHCD,
+       .proc_name              = UFSHCD,
+       .queuecommand           = ufshcd_queuecommand,
+       .slave_alloc            = ufshcd_slave_alloc,
+       .slave_configure        = ufshcd_slave_configure,
+       .slave_destroy          = ufshcd_slave_destroy,
+       .change_queue_depth     = ufshcd_change_queue_depth,
+       .eh_abort_handler       = ufshcd_abort,
+       .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
+       .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
+       .this_id                = -1,
+       .sg_tablesize           = SG_ALL,
+       .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
+       .can_queue              = UFSHCD_CAN_QUEUE,
+       .max_host_blocked       = 1,
+};
+
+static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
+                                  int ua)
+{
+       int ret = 0;
+       struct regulator *reg = vreg->reg;
+       const char *name = vreg->name;
+
+       BUG_ON(!vreg);
+
+       ret = regulator_set_optimum_mode(reg, ua);
+       if (ret >= 0) {
+               /*
+                * regulator_set_optimum_mode() returns new regulator
+                * mode upon success.
+                */
+               ret = 0;
+       } else {
+               dev_err(dev, "%s: %s set optimum mode(ua=%d) failed, err=%d\n",
+                               __func__, name, ua, ret);
+       }
+
+       return ret;
+}
+
+static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
+                                        struct ufs_vreg *vreg)
+{
+       return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
+}
+
+static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
+                                        struct ufs_vreg *vreg)
+{
+       return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+}
+
+static int ufshcd_config_vreg(struct device *dev,
+               struct ufs_vreg *vreg, bool on)
+{
+       int ret = 0;
+       struct regulator *reg = vreg->reg;
+       const char *name = vreg->name;
+       int min_uV, uA_load;
+
+       BUG_ON(!vreg);
+
+       if (regulator_count_voltages(reg) > 0) {
+               min_uV = on ? vreg->min_uV : 0;
+               ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+               if (ret) {
+                       dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+                                       __func__, name, ret);
+                       goto out;
+               }
+
+               uA_load = on ? vreg->max_uA : 0;
+               ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
+               if (ret)
+                       goto out;
+       }
+out:
+       return ret;
+}
+
+static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+       int ret = 0;
+
+       if (!vreg || vreg->enabled)
+               goto out;
+
+       ret = ufshcd_config_vreg(dev, vreg, true);
+       if (!ret)
+               ret = regulator_enable(vreg->reg);
+
+       if (!ret)
+               vreg->enabled = true;
+       else
+               dev_err(dev, "%s: %s enable failed, err=%d\n",
+                               __func__, vreg->name, ret);
+out:
+       return ret;
+}
+
+static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+       int ret = 0;
+
+       if (!vreg || !vreg->enabled)
+               goto out;
+
+       ret = regulator_disable(vreg->reg);
+
+       if (!ret) {
+               /* ignore errors on applying disable config */
+               ufshcd_config_vreg(dev, vreg, false);
+               vreg->enabled = false;
+       } else {
+               dev_err(dev, "%s: %s disable failed, err=%d\n",
+                               __func__, vreg->name, ret);
+       }
+out:
+       return ret;
+}
+
+static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
+{
+       int ret = 0;
+       struct device *dev = hba->dev;
+       struct ufs_vreg_info *info = &hba->vreg_info;
+
+       if (!info)
+               goto out;
+
+       ret = ufshcd_toggle_vreg(dev, info->vcc, on);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_toggle_vreg(dev, info->vccq, on);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
+       if (ret)
+               goto out;
+
+out:
+       if (ret) {
+               ufshcd_toggle_vreg(dev, info->vccq2, false);
+               ufshcd_toggle_vreg(dev, info->vccq, false);
+               ufshcd_toggle_vreg(dev, info->vcc, false);
+       }
+       return ret;
+}
+
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
+{
+       struct ufs_vreg_info *info = &hba->vreg_info;
+
+       if (info)
+               return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
+
+       return 0;
+}
+
+static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+       int ret = 0;
+
+       if (!vreg)
+               goto out;
+
+       vreg->reg = devm_regulator_get(dev, vreg->name);
+       if (IS_ERR(vreg->reg)) {
+               ret = PTR_ERR(vreg->reg);
+               dev_err(dev, "%s: %s get failed, err=%d\n",
+                               __func__, vreg->name, ret);
+       }
+out:
+       return ret;
+}
+
+static int ufshcd_init_vreg(struct ufs_hba *hba)
+{
+       int ret = 0;
+       struct device *dev = hba->dev;
+       struct ufs_vreg_info *info = &hba->vreg_info;
+
+       if (!info)
+               goto out;
+
+       ret = ufshcd_get_vreg(dev, info->vcc);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_get_vreg(dev, info->vccq);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_get_vreg(dev, info->vccq2);
+out:
+       return ret;
+}
+
+static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
+{
+       struct ufs_vreg_info *info = &hba->vreg_info;
+
+       if (info)
+               return ufshcd_get_vreg(hba->dev, info->vdd_hba);
+
+       return 0;
+}
+
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+                                       bool skip_ref_clk)
+{
+       int ret = 0;
+       struct ufs_clk_info *clki;
+       struct list_head *head = &hba->clk_list_head;
+       unsigned long flags;
+
+       if (!head || list_empty(head))
+               goto out;
+
+       list_for_each_entry(clki, head, list) {
+               if (!IS_ERR_OR_NULL(clki->clk)) {
+                       if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
+                               continue;
+
+                       if (on && !clki->enabled) {
+                               ret = clk_prepare_enable(clki->clk);
+                               if (ret) {
+                                       dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
+                                               __func__, clki->name, ret);
+                                       goto out;
+                               }
+                       } else if (!on && clki->enabled) {
+                               clk_disable_unprepare(clki->clk);
+                       }
+                       clki->enabled = on;
+                       dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
+                                       clki->name, on ? "en" : "dis");
+               }
+       }
+
+       if (hba->vops && hba->vops->setup_clocks)
+               ret = hba->vops->setup_clocks(hba, on);
+out:
+       if (ret) {
+               list_for_each_entry(clki, head, list) {
+                       if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
+                               clk_disable_unprepare(clki->clk);
+               }
+       } else if (!ret && on) {
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               hba->clk_gating.state = CLKS_ON;
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       }
+       return ret;
+}
+
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+{
+       return  __ufshcd_setup_clocks(hba, on, false);
+}
+
+static int ufshcd_init_clocks(struct ufs_hba *hba)
+{
+       int ret = 0;
+       struct ufs_clk_info *clki;
+       struct device *dev = hba->dev;
+       struct list_head *head = &hba->clk_list_head;
+
+       if (!head || list_empty(head))
+               goto out;
+
+       list_for_each_entry(clki, head, list) {
+               if (!clki->name)
+                       continue;
+
+               clki->clk = devm_clk_get(dev, clki->name);
+               if (IS_ERR(clki->clk)) {
+                       ret = PTR_ERR(clki->clk);
+                       dev_err(dev, "%s: %s clk get failed, %d\n",
+                                       __func__, clki->name, ret);
+                       goto out;
+               }
+
+               if (clki->max_freq) {
+                       ret = clk_set_rate(clki->clk, clki->max_freq);
+                       if (ret) {
+                               dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+                                       __func__, clki->name,
+                                       clki->max_freq, ret);
+                               goto out;
+                       }
+                       clki->curr_freq = clki->max_freq;
+               }
+               dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
+                               clki->name, clk_get_rate(clki->clk));
+       }
+out:
+       return ret;
+}
+
+static int ufshcd_variant_hba_init(struct ufs_hba *hba)
+{
+       int err = 0;
+
+       if (!hba->vops)
+               goto out;
+
+       if (hba->vops->init) {
+               err = hba->vops->init(hba);
+               if (err)
+                       goto out;
+       }
+
+       if (hba->vops->setup_regulators) {
+               err = hba->vops->setup_regulators(hba, true);
+               if (err)
+                       goto out_exit;
+       }
+
+       goto out;
+
+out_exit:
+       if (hba->vops->exit)
+               hba->vops->exit(hba);
+out:
+       if (err)
+               dev_err(hba->dev, "%s: variant %s init failed err %d\n",
+                       __func__, hba->vops ? hba->vops->name : "", err);
+       return err;
+}
+
+static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
+{
+       if (!hba->vops)
+               return;
+
+       if (hba->vops->setup_clocks)
+               hba->vops->setup_clocks(hba, false);
+
+       if (hba->vops->setup_regulators)
+               hba->vops->setup_regulators(hba, false);
+
+       if (hba->vops->exit)
+               hba->vops->exit(hba);
+}
+
+static int ufshcd_hba_init(struct ufs_hba *hba)
+{
+       int err;
+
+       /*
+        * Handle host controller power separately from the UFS device power
+        * rails as it will help controlling the UFS host controller power
+        * collapse easily which is different than UFS device power collapse.
+        * Also, enable the host controller power before we go ahead with rest
+        * of the initialization here.
+        */
+       err = ufshcd_init_hba_vreg(hba);
+       if (err)
+               goto out;
+
+       err = ufshcd_setup_hba_vreg(hba, true);
+       if (err)
+               goto out;
+
+       err = ufshcd_init_clocks(hba);
+       if (err)
+               goto out_disable_hba_vreg;
+
+       err = ufshcd_setup_clocks(hba, true);
+       if (err)
+               goto out_disable_hba_vreg;
+
+       err = ufshcd_init_vreg(hba);
+       if (err)
+               goto out_disable_clks;
+
+       err = ufshcd_setup_vreg(hba, true);
+       if (err)
+               goto out_disable_clks;
+
+       err = ufshcd_variant_hba_init(hba);
+       if (err)
+               goto out_disable_vreg;
+
+       hba->is_powered = true;
+       goto out;
+
+out_disable_vreg:
+       ufshcd_setup_vreg(hba, false);
+out_disable_clks:
+       ufshcd_setup_clocks(hba, false);
+out_disable_hba_vreg:
+       ufshcd_setup_hba_vreg(hba, false);
+out:
+       return err;
+}
+
+static void ufshcd_hba_exit(struct ufs_hba *hba)
+{
+       if (hba->is_powered) {
+               ufshcd_variant_hba_exit(hba);
+               ufshcd_setup_vreg(hba, false);
+               ufshcd_setup_clocks(hba, false);
+               ufshcd_setup_hba_vreg(hba, false);
+               hba->is_powered = false;
+       }
+}
+
+static int
+ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
+{
+       unsigned char cmd[6] = {REQUEST_SENSE,
+                               0,
+                               0,
+                               0,
+                               SCSI_SENSE_BUFFERSIZE,
+                               0};
+       char *buffer;
+       int ret;
+
+       buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+       if (!buffer) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
+                               SCSI_SENSE_BUFFERSIZE, NULL,
+                               msecs_to_jiffies(1000), 3, NULL, REQ_PM);
+       if (ret)
+               pr_err("%s: failed with err %d\n", __func__, ret);
+
+       kfree(buffer);
+out:
+       return ret;
+}
+
+/**
+ * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
+ *                          power mode
+ * @hba: per adapter instance
+ * @pwr_mode: device power mode to set
+ *
+ * Returns 0 if requested power mode is set successfully
+ * Returns non-zero if failed to set the requested power mode
+ */
+static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
+                                    enum ufs_dev_pwr_mode pwr_mode)
+{
+       unsigned char cmd[6] = { START_STOP };
+       struct scsi_sense_hdr sshdr;
+       struct scsi_device *sdp = hba->sdev_ufs_device;
+       int ret;
+
+       if (!sdp || !scsi_device_online(sdp))
+               return -ENODEV;
+
+       /*
+        * If scsi commands fail, the scsi mid-layer schedules scsi error-
+        * handling, which would wait for host to be resumed. Since we know
+        * we are functional while we are here, skip host resume in error
+        * handling context.
+        */
+       hba->host->eh_noresume = 1;
+       if (hba->wlun_dev_clr_ua) {
+               ret = ufshcd_send_request_sense(hba, sdp);
+               if (ret)
+                       goto out;
+               /* Unit attention condition is cleared now */
+               hba->wlun_dev_clr_ua = false;
+       }
+
+       cmd[4] = pwr_mode << 4;
+
+       /*
+        * Current function would be generally called from the power management
+        * callbacks hence set the REQ_PM flag so that it doesn't resume the
+        * already suspended childs.
+        */
+       ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
+                                    START_STOP_TIMEOUT, 0, NULL, REQ_PM);
+       if (ret) {
+               sdev_printk(KERN_WARNING, sdp,
+                           "START_STOP failed for power mode: %d, result %x\n",
+                           pwr_mode, ret);
+               if (driver_byte(ret) & DRIVER_SENSE) {
+                       scsi_show_sense_hdr(sdp, NULL, &sshdr);
+                       scsi_show_extd_sense(sdp, NULL, sshdr.asc, sshdr.ascq);
+               }
+       }
+
+       if (!ret)
+               hba->curr_dev_pwr_mode = pwr_mode;
+out:
+       hba->host->eh_noresume = 0;
+       return ret;
+}
+
+static int ufshcd_link_state_transition(struct ufs_hba *hba,
+                                       enum uic_link_state req_link_state,
+                                       int check_for_bkops)
+{
+       int ret = 0;
+
+       if (req_link_state == hba->uic_link_state)
+               return 0;
+
+       if (req_link_state == UIC_LINK_HIBERN8_STATE) {
+               ret = ufshcd_uic_hibern8_enter(hba);
+               if (!ret)
+                       ufshcd_set_link_hibern8(hba);
+               else
+                       goto out;
+       }
+       /*
+        * If autobkops is enabled, link can't be turned off because
+        * turning off the link would also turn off the device.
+        */
+       else if ((req_link_state == UIC_LINK_OFF_STATE) &&
+                  (!check_for_bkops || (check_for_bkops &&
+                   !hba->auto_bkops_enabled))) {
+               /*
+                * Change controller state to "reset state" which
+                * should also put the link in off/reset state
+                */
+               ufshcd_hba_stop(hba);
+               /*
+                * TODO: Check if we need any delay to make sure that
+                * controller is reset
+                */
+               ufshcd_set_link_off(hba);
+       }
+
+out:
+       return ret;
+}
+
+static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
+{
+       /*
+        * If UFS device is either in UFS_Sleep turn off VCC rail to save some
+        * power.
+        *
+        * If UFS device and link is in OFF state, all power supplies (VCC,
+        * VCCQ, VCCQ2) can be turned off if power on write protect is not
+        * required. If UFS link is inactive (Hibern8 or OFF state) and device
+        * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
+        *
+        * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
+        * in low power state which would save some power.
+        */
+       if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+           !hba->dev_info.is_lu_power_on_wp) {
+               ufshcd_setup_vreg(hba, false);
+       } else if (!ufshcd_is_ufs_dev_active(hba)) {
+               ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+               if (!ufshcd_is_link_active(hba)) {
+                       ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+                       ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
+               }
+       }
+}
+
+static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
+{
+       int ret = 0;
+
+       if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+           !hba->dev_info.is_lu_power_on_wp) {
+               ret = ufshcd_setup_vreg(hba, true);
+       } else if (!ufshcd_is_ufs_dev_active(hba)) {
+               ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
+               if (!ret && !ufshcd_is_link_active(hba)) {
+                       ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+                       if (ret)
+                               goto vcc_disable;
+                       ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
+                       if (ret)
+                               goto vccq_lpm;
+               }
+       }
+       goto out;
+
+vccq_lpm:
+       ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+vcc_disable:
+       ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
 out:
-       return;
+       return ret;
 }
 
-static struct scsi_host_template ufshcd_driver_template = {
-       .module                 = THIS_MODULE,
-       .name                   = UFSHCD,
-       .proc_name              = UFSHCD,
-       .queuecommand           = ufshcd_queuecommand,
-       .slave_alloc            = ufshcd_slave_alloc,
-       .slave_configure        = ufshcd_slave_configure,
-       .slave_destroy          = ufshcd_slave_destroy,
-       .change_queue_depth     = ufshcd_change_queue_depth,
-       .eh_abort_handler       = ufshcd_abort,
-       .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
-       .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
-       .this_id                = -1,
-       .sg_tablesize           = SG_ALL,
-       .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
-       .can_queue              = UFSHCD_CAN_QUEUE,
-};
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
+{
+       if (ufshcd_is_link_off(hba))
+               ufshcd_setup_hba_vreg(hba, false);
+}
+
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
+{
+       if (ufshcd_is_link_off(hba))
+               ufshcd_setup_hba_vreg(hba, true);
+}
 
 /**
- * ufshcd_suspend - suspend power management function
+ * ufshcd_suspend - helper function for suspend operations
  * @hba: per adapter instance
- * @state: power state
+ * @pm_op: desired low power operation type
  *
- * Returns -ENOSYS
+ * This function will try to put the UFS device and link into low power
+ * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
+ * (System PM level).
+ *
+ * If this function is called during shutdown, it will make sure that
+ * both UFS device and UFS link is powered off.
+ *
+ * NOTE: UFS device & link must be active before we enter in this function.
+ *
+ * Returns 0 for success and non-zero for failure
  */
-int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
+static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 {
+       int ret = 0;
+       enum ufs_pm_level pm_lvl;
+       enum ufs_dev_pwr_mode req_dev_pwr_mode;
+       enum uic_link_state req_link_state;
+
+       hba->pm_op_in_progress = 1;
+       if (!ufshcd_is_shutdown_pm(pm_op)) {
+               pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
+                        hba->rpm_lvl : hba->spm_lvl;
+               req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
+               req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
+       } else {
+               req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
+               req_link_state = UIC_LINK_OFF_STATE;
+       }
+
+       /*
+        * If we can't transition into any of the low power modes
+        * just gate the clocks.
+        */
+       ufshcd_hold(hba, false);
+       hba->clk_gating.is_suspended = true;
+
+       if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
+                       req_link_state == UIC_LINK_ACTIVE_STATE) {
+               goto disable_clks;
+       }
+
+       if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
+           (req_link_state == hba->uic_link_state))
+               goto out;
+
+       /* UFS device & link must be active before we enter in this function */
+       if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (ufshcd_is_runtime_pm(pm_op)) {
+               if (ufshcd_can_autobkops_during_suspend(hba)) {
+                       /*
+                        * The device is idle with no requests in the queue,
+                        * allow background operations if bkops status shows
+                        * that performance might be impacted.
+                        */
+                       ret = ufshcd_urgent_bkops(hba);
+                       if (ret)
+                               goto enable_gating;
+               } else {
+                       /* make sure that auto bkops is disabled */
+                       ufshcd_disable_auto_bkops(hba);
+               }
+       }
+
+       if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
+            ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
+              !ufshcd_is_runtime_pm(pm_op))) {
+               /* ensure that bkops is disabled */
+               ufshcd_disable_auto_bkops(hba);
+               ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+               if (ret)
+                       goto enable_gating;
+       }
+
+       ret = ufshcd_link_state_transition(hba, req_link_state, 1);
+       if (ret)
+               goto set_dev_active;
+
+       ufshcd_vreg_set_lpm(hba);
+
+disable_clks:
        /*
-        * TODO:
-        * 1. Block SCSI requests from SCSI midlayer
-        * 2. Change the internal driver state to non operational
-        * 3. Set UTRLRSR and UTMRLRSR bits to zero
-        * 4. Wait until outstanding commands are completed
-        * 5. Set HCE to zero to send the UFS host controller to reset state
+        * The clock scaling needs access to controller registers. Hence, Wait
+        * for pending clock scaling work to be done before clocks are
+        * turned off.
         */
+       if (ufshcd_is_clkscaling_enabled(hba)) {
+               devfreq_suspend_device(hba->devfreq);
+               hba->clk_scaling.window_start_t = 0;
+       }
+       /*
+        * Call vendor specific suspend callback. As these callbacks may access
+        * vendor specific host controller register space call them before the
+        * host clocks are ON.
+        */
+       if (hba->vops && hba->vops->suspend) {
+               ret = hba->vops->suspend(hba, pm_op);
+               if (ret)
+                       goto set_link_active;
+       }
 
-       return -ENOSYS;
+       if (hba->vops && hba->vops->setup_clocks) {
+               ret = hba->vops->setup_clocks(hba, false);
+               if (ret)
+                       goto vops_resume;
+       }
+
+       if (!ufshcd_is_link_active(hba))
+               ufshcd_setup_clocks(hba, false);
+       else
+               /* If link is active, device ref_clk can't be switched off */
+               __ufshcd_setup_clocks(hba, false, true);
+
+       hba->clk_gating.state = CLKS_OFF;
+       /*
+        * Disable the host irq as host controller as there won't be any
+        * host controller trasanction expected till resume.
+        */
+       ufshcd_disable_irq(hba);
+       /* Put the host controller in low power mode if possible */
+       ufshcd_hba_vreg_set_lpm(hba);
+       goto out;
+
+vops_resume:
+       if (hba->vops && hba->vops->resume)
+               hba->vops->resume(hba, pm_op);
+set_link_active:
+       ufshcd_vreg_set_hpm(hba);
+       if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
+               ufshcd_set_link_active(hba);
+       else if (ufshcd_is_link_off(hba))
+               ufshcd_host_reset_and_restore(hba);
+set_dev_active:
+       if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
+               ufshcd_disable_auto_bkops(hba);
+enable_gating:
+       hba->clk_gating.is_suspended = false;
+       ufshcd_release(hba);
+out:
+       hba->pm_op_in_progress = 0;
+       return ret;
 }
-EXPORT_SYMBOL_GPL(ufshcd_suspend);
 
 /**
- * ufshcd_resume - resume power management function
+ * ufshcd_resume - helper function for resume operations
  * @hba: per adapter instance
+ * @pm_op: runtime PM or system PM
  *
- * Returns -ENOSYS
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state.
+ *
+ * Returns 0 for success and non-zero for failure
  */
-int ufshcd_resume(struct ufs_hba *hba)
+static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 {
+       int ret;
+       enum uic_link_state old_link_state;
+
+       hba->pm_op_in_progress = 1;
+       old_link_state = hba->uic_link_state;
+
+       ufshcd_hba_vreg_set_hpm(hba);
+       /* Make sure clocks are enabled before accessing controller */
+       ret = ufshcd_setup_clocks(hba, true);
+       if (ret)
+               goto out;
+
+       /* enable the host irq as host controller would be active soon */
+       ret = ufshcd_enable_irq(hba);
+       if (ret)
+               goto disable_irq_and_vops_clks;
+
+       ret = ufshcd_vreg_set_hpm(hba);
+       if (ret)
+               goto disable_irq_and_vops_clks;
+
+       /*
+        * Call vendor specific resume callback. As these callbacks may access
+        * vendor specific host controller register space call them when the
+        * host clocks are ON.
+        */
+       if (hba->vops && hba->vops->resume) {
+               ret = hba->vops->resume(hba, pm_op);
+               if (ret)
+                       goto disable_vreg;
+       }
+
+       if (ufshcd_is_link_hibern8(hba)) {
+               ret = ufshcd_uic_hibern8_exit(hba);
+               if (!ret)
+                       ufshcd_set_link_active(hba);
+               else
+                       goto vendor_suspend;
+       } else if (ufshcd_is_link_off(hba)) {
+               ret = ufshcd_host_reset_and_restore(hba);
+               /*
+                * ufshcd_host_reset_and_restore() should have already
+                * set the link state as active
+                */
+               if (ret || !ufshcd_is_link_active(hba))
+                       goto vendor_suspend;
+       }
+
+       if (!ufshcd_is_ufs_dev_active(hba)) {
+               ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
+               if (ret)
+                       goto set_old_link_state;
+       }
+
        /*
-        * TODO:
-        * 1. Set HCE to 1, to start the UFS host controller
-        * initialization process
-        * 2. Set UTRLRSR and UTMRLRSR bits to 1
-        * 3. Change the internal driver state to operational
-        * 4. Unblock SCSI requests from SCSI midlayer
+        * If BKOPs operations are urgently needed at this moment then
+        * keep auto-bkops enabled or else disable it.
         */
+       ufshcd_urgent_bkops(hba);
+       hba->clk_gating.is_suspended = false;
+
+       if (ufshcd_is_clkscaling_enabled(hba))
+               devfreq_resume_device(hba->devfreq);
+
+       /* Schedule clock gating in case of no access to UFS device yet */
+       ufshcd_release(hba);
+       goto out;
+
+set_old_link_state:
+       ufshcd_link_state_transition(hba, old_link_state, 0);
+vendor_suspend:
+       if (hba->vops && hba->vops->suspend)
+               hba->vops->suspend(hba, pm_op);
+disable_vreg:
+       ufshcd_vreg_set_lpm(hba);
+disable_irq_and_vops_clks:
+       ufshcd_disable_irq(hba);
+       ufshcd_setup_clocks(hba, false);
+out:
+       hba->pm_op_in_progress = 0;
+       return ret;
+}
+
+/**
+ * ufshcd_system_suspend - system suspend routine
+ * @hba: per adapter instance
+ * @pm_op: runtime PM or system PM
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_system_suspend(struct ufs_hba *hba)
+{
+       int ret = 0;
+
+       if (!hba || !hba->is_powered)
+               goto out;
+
+       if (pm_runtime_suspended(hba->dev)) {
+               if (hba->rpm_lvl == hba->spm_lvl)
+                       /*
+                        * There is possibility that device may still be in
+                        * active state during the runtime suspend.
+                        */
+                       if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+                           hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
+                               goto out;
+
+               /*
+                * UFS device and/or UFS link low power states during runtime
+                * suspend seems to be different than what is expected during
+                * system suspend. Hence runtime resume the devic & link and
+                * let the system suspend low power states to take effect.
+                * TODO: If resume takes longer time, we might have optimize
+                * it in future by not resuming everything if possible.
+                */
+               ret = ufshcd_runtime_resume(hba);
+               if (ret)
+                       goto out;
+       }
+
+       ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
+out:
+       if (!ret)
+               hba->is_sys_suspended = true;
+       return ret;
+}
+EXPORT_SYMBOL(ufshcd_system_suspend);
 
-       return -ENOSYS;
+/**
+ * ufshcd_system_resume - system resume routine
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+
+int ufshcd_system_resume(struct ufs_hba *hba)
+{
+       if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+               /*
+                * Let the runtime resume take care of resuming
+                * if runtime suspended.
+                */
+               return 0;
+
+       return ufshcd_resume(hba, UFS_SYSTEM_PM);
 }
-EXPORT_SYMBOL_GPL(ufshcd_resume);
+EXPORT_SYMBOL(ufshcd_system_resume);
 
+/**
+ * ufshcd_runtime_suspend - runtime suspend routine
+ * @hba: per adapter instance
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
 int ufshcd_runtime_suspend(struct ufs_hba *hba)
 {
-       if (!hba)
+       if (!hba || !hba->is_powered)
                return 0;
 
-       /*
-        * The device is idle with no requests in the queue,
-        * allow background operations.
-        */
-       return ufshcd_enable_auto_bkops(hba);
+       return ufshcd_suspend(hba, UFS_RUNTIME_PM);
 }
 EXPORT_SYMBOL(ufshcd_runtime_suspend);
 
+/**
+ * ufshcd_runtime_resume - runtime resume routine
+ * @hba: per adapter instance
+ *
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state. Following operations are done in this function:
+ *
+ * 1. Turn on all the controller related clocks
+ * 2. Bring the UniPro link out of Hibernate state
+ * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
+ *    to active state.
+ * 4. If auto-bkops is enabled on the device, disable it.
+ *
+ * So following would be the possible power state after this function return
+ * successfully:
+ *     S1: UFS device in Active state with VCC rail ON
+ *         UniPro link in Active state
+ *         All the UFS/UniPro controller clocks are ON
+ *
+ * Returns 0 for success and non-zero for failure
+ */
 int ufshcd_runtime_resume(struct ufs_hba *hba)
 {
-       if (!hba)
+       if (!hba || !hba->is_powered)
                return 0;
-
-       return ufshcd_disable_auto_bkops(hba);
+       else
+               return ufshcd_resume(hba, UFS_RUNTIME_PM);
 }
 EXPORT_SYMBOL(ufshcd_runtime_resume);
 
@@ -3244,6 +5193,36 @@ int ufshcd_runtime_idle(struct ufs_hba *hba)
 }
 EXPORT_SYMBOL(ufshcd_runtime_idle);
 
+/**
+ * ufshcd_shutdown - shutdown routine
+ * @hba: per adapter instance
+ *
+ * This function would power off both UFS device and UFS link.
+ *
+ * Returns 0 always to allow force shutdown even in case of errors.
+ */
+int ufshcd_shutdown(struct ufs_hba *hba)
+{
+       int ret = 0;
+
+       if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
+               goto out;
+
+       if (pm_runtime_suspended(hba->dev)) {
+               ret = ufshcd_runtime_resume(hba);
+               if (ret)
+                       goto out;
+       }
+
+       ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+out:
+       if (ret)
+               dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
+       /* allow force shutdown even in case of errors */
+       return 0;
+}
+EXPORT_SYMBOL(ufshcd_shutdown);
+
 /**
  * ufshcd_remove - de-allocate SCSI host and host memory space
  *             data structure memory
@@ -3252,11 +5231,17 @@ EXPORT_SYMBOL(ufshcd_runtime_idle);
 void ufshcd_remove(struct ufs_hba *hba)
 {
        scsi_remove_host(hba->host);
+       ufshcd_scsi_remove_wlus(hba);
        /* disable interrupts */
        ufshcd_disable_intr(hba, hba->intr_mask);
        ufshcd_hba_stop(hba);
 
        scsi_host_put(hba->host);
+
+       ufshcd_exit_clk_gating(hba);
+       if (ufshcd_is_clkscaling_enabled(hba))
+               devfreq_remove_device(hba->devfreq);
+       ufshcd_hba_exit(hba);
 }
 EXPORT_SYMBOL_GPL(ufshcd_remove);
 
@@ -3277,19 +5262,16 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
 }
 
 /**
- * ufshcd_init - Driver initialization routine
+ * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
  * @dev: pointer to device handle
  * @hba_handle: driver private handle
- * @mmio_base: base register address
- * @irq: Interrupt line of device
  * Returns 0 on success, non-zero value on failure
  */
-int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
-                void __iomem *mmio_base, unsigned int irq)
+int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
 {
        struct Scsi_Host *host;
        struct ufs_hba *hba;
-       int err;
+       int err = 0;
 
        if (!dev) {
                dev_err(dev,
@@ -3298,13 +5280,6 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
                goto out_error;
        }
 
-       if (!mmio_base) {
-               dev_err(dev,
-               "Invalid memory reference for mmio_base is NULL\n");
-               err = -ENODEV;
-               goto out_error;
-       }
-
        host = scsi_host_alloc(&ufshcd_driver_template,
                                sizeof(struct ufs_hba));
        if (!host) {
@@ -3315,9 +5290,146 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
        hba = shost_priv(host);
        hba->host = host;
        hba->dev = dev;
+       *hba_handle = hba;
+
+out_error:
+       return err;
+}
+EXPORT_SYMBOL(ufshcd_alloc_host);
+
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+       int ret = 0;
+       struct ufs_clk_info *clki;
+       struct list_head *head = &hba->clk_list_head;
+
+       if (!head || list_empty(head))
+               goto out;
+
+       list_for_each_entry(clki, head, list) {
+               if (!IS_ERR_OR_NULL(clki->clk)) {
+                       if (scale_up && clki->max_freq) {
+                               if (clki->curr_freq == clki->max_freq)
+                                       continue;
+                               ret = clk_set_rate(clki->clk, clki->max_freq);
+                               if (ret) {
+                                       dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+                                               __func__, clki->name,
+                                               clki->max_freq, ret);
+                                       break;
+                               }
+                               clki->curr_freq = clki->max_freq;
+
+                       } else if (!scale_up && clki->min_freq) {
+                               if (clki->curr_freq == clki->min_freq)
+                                       continue;
+                               ret = clk_set_rate(clki->clk, clki->min_freq);
+                               if (ret) {
+                                       dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+                                               __func__, clki->name,
+                                               clki->min_freq, ret);
+                                       break;
+                               }
+                               clki->curr_freq = clki->min_freq;
+                       }
+               }
+               dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
+                               clki->name, clk_get_rate(clki->clk));
+       }
+       if (hba->vops->clk_scale_notify)
+               hba->vops->clk_scale_notify(hba);
+out:
+       return ret;
+}
+
+static int ufshcd_devfreq_target(struct device *dev,
+                               unsigned long *freq, u32 flags)
+{
+       int err = 0;
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+
+       if (!ufshcd_is_clkscaling_enabled(hba))
+               return -EINVAL;
+
+       if (*freq == UINT_MAX)
+               err = ufshcd_scale_clks(hba, true);
+       else if (*freq == 0)
+               err = ufshcd_scale_clks(hba, false);
+
+       return err;
+}
+
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+               struct devfreq_dev_status *stat)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+       unsigned long flags;
+
+       if (!ufshcd_is_clkscaling_enabled(hba))
+               return -EINVAL;
+
+       memset(stat, 0, sizeof(*stat));
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (!scaling->window_start_t)
+               goto start_window;
+
+       if (scaling->is_busy_started)
+               scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+                                       scaling->busy_start_t));
+
+       stat->total_time = jiffies_to_usecs((long)jiffies -
+                               (long)scaling->window_start_t);
+       stat->busy_time = scaling->tot_busy_t;
+start_window:
+       scaling->window_start_t = jiffies;
+       scaling->tot_busy_t = 0;
+
+       if (hba->outstanding_reqs) {
+               scaling->busy_start_t = ktime_get();
+               scaling->is_busy_started = true;
+       } else {
+               scaling->busy_start_t = ktime_set(0, 0);
+               scaling->is_busy_started = false;
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       return 0;
+}
+
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+       .polling_ms     = 100,
+       .target         = ufshcd_devfreq_target,
+       .get_dev_status = ufshcd_devfreq_get_dev_status,
+};
+
+/**
+ * ufshcd_init - Driver initialization routine
+ * @hba: per-adapter instance
+ * @mmio_base: base register address
+ * @irq: Interrupt line of device
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+{
+       int err;
+       struct Scsi_Host *host = hba->host;
+       struct device *dev = hba->dev;
+
+       if (!mmio_base) {
+               dev_err(hba->dev,
+               "Invalid memory reference for mmio_base is NULL\n");
+               err = -ENODEV;
+               goto out_error;
+       }
+
        hba->mmio_base = mmio_base;
        hba->irq = irq;
 
+       err = ufshcd_hba_init(hba);
+       if (err)
+               goto out_error;
+
        /* Read capabilities registers */
        ufshcd_hba_capabilities(hba);
 
@@ -3346,11 +5458,13 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
        host->can_queue = hba->nutrs;
        host->cmd_per_lun = hba->nutrs;
        host->max_id = UFSHCD_MAX_ID;
-       host->max_lun = UFSHCD_MAX_LUNS;
+       host->max_lun = UFS_MAX_LUNS;
        host->max_channel = UFSHCD_MAX_CHANNEL;
        host->unique_id = host->host_no;
        host->max_cmd_len = MAX_CDB_SIZE;
 
+       hba->max_pwr_info.is_valid = false;
+
        /* Initailize wait queue for task management */
        init_waitqueue_head(&hba->tm_wq);
        init_waitqueue_head(&hba->tm_tag_wq);
@@ -3368,24 +5482,27 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
        /* Initialize device management tag acquire wait queue */
        init_waitqueue_head(&hba->dev_cmd.tag_wq);
 
+       ufshcd_init_clk_gating(hba);
        /* IRQ registration */
        err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
        if (err) {
                dev_err(hba->dev, "request irq failed\n");
-               goto out_disable;
+               goto exit_gating;
+       } else {
+               hba->is_irq_enabled = true;
        }
 
        /* Enable SCSI tag mapping */
        err = scsi_init_shared_tag_map(host, host->can_queue);
        if (err) {
                dev_err(hba->dev, "init shared queue failed\n");
-               goto out_disable;
+               goto exit_gating;
        }
 
        err = scsi_add_host(host, hba->dev);
        if (err) {
                dev_err(hba->dev, "scsi_add_host failed\n");
-               goto out_disable;
+               goto exit_gating;
        }
 
        /* Host controller enable */
@@ -3395,19 +5512,40 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
                goto out_remove_scsi_host;
        }
 
-       *hba_handle = hba;
+       if (ufshcd_is_clkscaling_enabled(hba)) {
+               hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
+                                                  "simple_ondemand", NULL);
+               if (IS_ERR(hba->devfreq)) {
+                       dev_err(hba->dev, "Unable to register with devfreq %ld\n",
+                                       PTR_ERR(hba->devfreq));
+                       goto out_remove_scsi_host;
+               }
+               /* Suspend devfreq until the UFS device is detected */
+               devfreq_suspend_device(hba->devfreq);
+               hba->clk_scaling.window_start_t = 0;
+       }
 
        /* Hold auto suspend until async scan completes */
        pm_runtime_get_sync(dev);
 
+       /*
+        * The device-initialize-sequence hasn't been invoked yet.
+        * Set the device to power-off state
+        */
+       ufshcd_set_ufs_dev_poweroff(hba);
+
        async_schedule(ufshcd_async_scan, hba);
 
        return 0;
 
 out_remove_scsi_host:
        scsi_remove_host(hba->host);
+exit_gating:
+       ufshcd_exit_clk_gating(hba);
 out_disable:
+       hba->is_irq_enabled = false;
        scsi_host_put(host);
+       ufshcd_hba_exit(hba);
 out_error:
        return err;
 }