2 * Universal Flash Storage Host controller driver Core
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
40 #include <linux/async.h>
41 #include <linux/devfreq.h>
46 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
49 /* UIC command timeout, unit: ms */
50 #define UIC_CMD_TIMEOUT 500
52 /* NOP OUT retries waiting for NOP IN response */
53 #define NOP_OUT_RETRIES 10
54 /* Timeout after 30 msecs if NOP OUT hangs without response */
55 #define NOP_OUT_TIMEOUT 30 /* msecs */
57 /* Query request retries */
58 #define QUERY_REQ_RETRIES 10
59 /* Query request timeout */
60 #define QUERY_REQ_TIMEOUT 30 /* msec */
62 /* Task management command timeout */
63 #define TM_CMD_TIMEOUT 100 /* msecs */
65 /* maximum number of link-startup retries */
66 #define DME_LINKSTARTUP_RETRIES 3
68 /* maximum number of reset retries before giving up */
69 #define MAX_HOST_RESET_RETRIES 5
71 /* Expose the flag value from utp_upiu_query.value */
72 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
74 /* Interrupt aggregation default timeout, unit: 40us */
75 #define INT_AGGR_DEF_TO 0x02
77 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
81 _ret = ufshcd_enable_vreg(_dev, _vreg); \
83 _ret = ufshcd_disable_vreg(_dev, _vreg); \
87 static u32 ufs_query_desc_max_size
[] = {
88 QUERY_DESC_DEVICE_MAX_SIZE
,
89 QUERY_DESC_CONFIGURAION_MAX_SIZE
,
90 QUERY_DESC_UNIT_MAX_SIZE
,
91 QUERY_DESC_RFU_MAX_SIZE
,
92 QUERY_DESC_INTERCONNECT_MAX_SIZE
,
93 QUERY_DESC_STRING_MAX_SIZE
,
94 QUERY_DESC_RFU_MAX_SIZE
,
95 QUERY_DESC_GEOMETRY_MAZ_SIZE
,
96 QUERY_DESC_POWER_MAX_SIZE
,
97 QUERY_DESC_RFU_MAX_SIZE
,
101 UFSHCD_MAX_CHANNEL
= 0,
103 UFSHCD_CMD_PER_LUN
= 32,
104 UFSHCD_CAN_QUEUE
= 32,
111 UFSHCD_STATE_OPERATIONAL
,
114 /* UFSHCD error handling flags */
116 UFSHCD_EH_IN_PROGRESS
= (1 << 0),
119 /* UFSHCD UIC layer error flags */
121 UFSHCD_UIC_DL_PA_INIT_ERROR
= (1 << 0), /* Data link layer error */
122 UFSHCD_UIC_NL_ERROR
= (1 << 1), /* Network layer error */
123 UFSHCD_UIC_TL_ERROR
= (1 << 2), /* Transport Layer error */
124 UFSHCD_UIC_DME_ERROR
= (1 << 3), /* DME error */
127 /* Interrupt configuration options */
134 #define ufshcd_set_eh_in_progress(h) \
135 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
136 #define ufshcd_eh_in_progress(h) \
137 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
138 #define ufshcd_clear_eh_in_progress(h) \
139 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
141 #define ufshcd_set_ufs_dev_active(h) \
142 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
143 #define ufshcd_set_ufs_dev_sleep(h) \
144 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
145 #define ufshcd_set_ufs_dev_poweroff(h) \
146 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
147 #define ufshcd_is_ufs_dev_active(h) \
148 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
149 #define ufshcd_is_ufs_dev_sleep(h) \
150 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
151 #define ufshcd_is_ufs_dev_poweroff(h) \
152 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
154 static struct ufs_pm_lvl_states ufs_pm_lvl_states
[] = {
155 {UFS_ACTIVE_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
156 {UFS_ACTIVE_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
157 {UFS_SLEEP_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
158 {UFS_SLEEP_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
159 {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
160 {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_OFF_STATE
},
163 static inline enum ufs_dev_pwr_mode
164 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl
)
166 return ufs_pm_lvl_states
[lvl
].dev_state
;
169 static inline enum uic_link_state
170 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl
)
172 return ufs_pm_lvl_states
[lvl
].link_state
;
175 static void ufshcd_tmc_handler(struct ufs_hba
*hba
);
176 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
);
177 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
);
178 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
);
179 static void ufshcd_hba_exit(struct ufs_hba
*hba
);
180 static int ufshcd_probe_hba(struct ufs_hba
*hba
);
181 static int __ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
,
183 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
);
184 static int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
);
185 static int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
);
186 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
);
187 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
);
188 static int ufshcd_config_pwr_mode(struct ufs_hba
*hba
,
189 struct ufs_pa_layer_attr
*desired_pwr_mode
);
191 static inline int ufshcd_enable_irq(struct ufs_hba
*hba
)
195 if (!hba
->is_irq_enabled
) {
196 ret
= request_irq(hba
->irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
,
199 dev_err(hba
->dev
, "%s: request_irq failed, ret=%d\n",
201 hba
->is_irq_enabled
= true;
207 static inline void ufshcd_disable_irq(struct ufs_hba
*hba
)
209 if (hba
->is_irq_enabled
) {
210 free_irq(hba
->irq
, hba
);
211 hba
->is_irq_enabled
= false;
216 * ufshcd_wait_for_register - wait for register value to change
217 * @hba - per-adapter interface
218 * @reg - mmio register offset
219 * @mask - mask to apply to read register value
220 * @val - wait condition
221 * @interval_us - polling interval in microsecs
222 * @timeout_ms - timeout in millisecs
224 * Returns -ETIMEDOUT on error, zero on success
226 static int ufshcd_wait_for_register(struct ufs_hba
*hba
, u32 reg
, u32 mask
,
227 u32 val
, unsigned long interval_us
, unsigned long timeout_ms
)
230 unsigned long timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
);
232 /* ignore bits that we don't intend to wait on */
235 while ((ufshcd_readl(hba
, reg
) & mask
) != val
) {
236 /* wakeup within 50us of expiry */
237 usleep_range(interval_us
, interval_us
+ 50);
239 if (time_after(jiffies
, timeout
)) {
240 if ((ufshcd_readl(hba
, reg
) & mask
) != val
)
250 * ufshcd_get_intr_mask - Get the interrupt bit mask
251 * @hba - Pointer to adapter instance
253 * Returns interrupt bit mask per version
255 static inline u32
ufshcd_get_intr_mask(struct ufs_hba
*hba
)
257 if (hba
->ufs_version
== UFSHCI_VERSION_10
)
258 return INTERRUPT_MASK_ALL_VER_10
;
260 return INTERRUPT_MASK_ALL_VER_11
;
264 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
265 * @hba - Pointer to adapter instance
267 * Returns UFSHCI version supported by the controller
269 static inline u32
ufshcd_get_ufs_version(struct ufs_hba
*hba
)
271 return ufshcd_readl(hba
, REG_UFS_VERSION
);
275 * ufshcd_is_device_present - Check if any device connected to
276 * the host controller
277 * @hba: pointer to adapter instance
279 * Returns 1 if device present, 0 if no device detected
281 static inline int ufshcd_is_device_present(struct ufs_hba
*hba
)
283 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) &
284 DEVICE_PRESENT
) ? 1 : 0;
288 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
289 * @lrb: pointer to local command reference block
291 * This function is used to get the OCS field from UTRD
292 * Returns the OCS field in the UTRD
294 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb
*lrbp
)
296 return le32_to_cpu(lrbp
->utr_descriptor_ptr
->header
.dword_2
) & MASK_OCS
;
300 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
301 * @task_req_descp: pointer to utp_task_req_desc structure
303 * This function is used to get the OCS field from UTMRD
304 * Returns the OCS field in the UTMRD
307 ufshcd_get_tmr_ocs(struct utp_task_req_desc
*task_req_descp
)
309 return le32_to_cpu(task_req_descp
->header
.dword_2
) & MASK_OCS
;
313 * ufshcd_get_tm_free_slot - get a free slot for task management request
314 * @hba: per adapter instance
315 * @free_slot: pointer to variable with available slot value
317 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
318 * Returns 0 if free slot is not available, else return 1 with tag value
321 static bool ufshcd_get_tm_free_slot(struct ufs_hba
*hba
, int *free_slot
)
330 tag
= find_first_zero_bit(&hba
->tm_slots_in_use
, hba
->nutmrs
);
331 if (tag
>= hba
->nutmrs
)
333 } while (test_and_set_bit_lock(tag
, &hba
->tm_slots_in_use
));
341 static inline void ufshcd_put_tm_slot(struct ufs_hba
*hba
, int slot
)
343 clear_bit_unlock(slot
, &hba
->tm_slots_in_use
);
347 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
348 * @hba: per adapter instance
349 * @pos: position of the bit to be cleared
351 static inline void ufshcd_utrl_clear(struct ufs_hba
*hba
, u32 pos
)
353 ufshcd_writel(hba
, ~(1 << pos
), REG_UTP_TRANSFER_REQ_LIST_CLEAR
);
357 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
358 * @reg: Register value of host controller status
360 * Returns integer, 0 on Success and positive value if failed
362 static inline int ufshcd_get_lists_status(u32 reg
)
365 * The mask 0xFF is for the following HCS register bits
375 return (((reg
) & (0xFF)) >> 1) ^ (0x07);
379 * ufshcd_get_uic_cmd_result - Get the UIC command result
380 * @hba: Pointer to adapter instance
382 * This function gets the result of UIC command completion
383 * Returns 0 on success, non zero value on error
385 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba
*hba
)
387 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
) &
388 MASK_UIC_COMMAND_RESULT
;
392 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
393 * @hba: Pointer to adapter instance
395 * This function gets UIC command argument3
396 * Returns 0 on success, non zero value on error
398 static inline u32
ufshcd_get_dme_attr_val(struct ufs_hba
*hba
)
400 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
);
404 * ufshcd_get_req_rsp - returns the TR response transaction type
405 * @ucd_rsp_ptr: pointer to response UPIU
408 ufshcd_get_req_rsp(struct utp_upiu_rsp
*ucd_rsp_ptr
)
410 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_0
) >> 24;
414 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
415 * @ucd_rsp_ptr: pointer to response UPIU
417 * This function gets the response status and scsi_status from response UPIU
418 * Returns the response result code.
421 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp
*ucd_rsp_ptr
)
423 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_1
) & MASK_RSP_UPIU_RESULT
;
427 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
429 * @ucd_rsp_ptr: pointer to response UPIU
431 * Return the data segment length.
433 static inline unsigned int
434 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp
*ucd_rsp_ptr
)
436 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_2
) &
437 MASK_RSP_UPIU_DATA_SEG_LEN
;
441 * ufshcd_is_exception_event - Check if the device raised an exception event
442 * @ucd_rsp_ptr: pointer to response UPIU
444 * The function checks if the device raised an exception event indicated in
445 * the Device Information field of response UPIU.
447 * Returns true if exception is raised, false otherwise.
449 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp
*ucd_rsp_ptr
)
451 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_2
) &
452 MASK_RSP_EXCEPTION_EVENT
? true : false;
456 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
457 * @hba: per adapter instance
460 ufshcd_reset_intr_aggr(struct ufs_hba
*hba
)
462 ufshcd_writel(hba
, INT_AGGR_ENABLE
|
463 INT_AGGR_COUNTER_AND_TIMER_RESET
,
464 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
468 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
469 * @hba: per adapter instance
470 * @cnt: Interrupt aggregation counter threshold
471 * @tmout: Interrupt aggregation timeout value
474 ufshcd_config_intr_aggr(struct ufs_hba
*hba
, u8 cnt
, u8 tmout
)
476 ufshcd_writel(hba
, INT_AGGR_ENABLE
| INT_AGGR_PARAM_WRITE
|
477 INT_AGGR_COUNTER_THLD_VAL(cnt
) |
478 INT_AGGR_TIMEOUT_VAL(tmout
),
479 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
483 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
484 * When run-stop registers are set to 1, it indicates the
485 * host controller that it can process the requests
486 * @hba: per adapter instance
488 static void ufshcd_enable_run_stop_reg(struct ufs_hba
*hba
)
490 ufshcd_writel(hba
, UTP_TASK_REQ_LIST_RUN_STOP_BIT
,
491 REG_UTP_TASK_REQ_LIST_RUN_STOP
);
492 ufshcd_writel(hba
, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT
,
493 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP
);
497 * ufshcd_hba_start - Start controller initialization sequence
498 * @hba: per adapter instance
500 static inline void ufshcd_hba_start(struct ufs_hba
*hba
)
502 ufshcd_writel(hba
, CONTROLLER_ENABLE
, REG_CONTROLLER_ENABLE
);
506 * ufshcd_is_hba_active - Get controller state
507 * @hba: per adapter instance
509 * Returns zero if controller is active, 1 otherwise
511 static inline int ufshcd_is_hba_active(struct ufs_hba
*hba
)
513 return (ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
) & 0x1) ? 0 : 1;
516 static void ufshcd_ungate_work(struct work_struct
*work
)
520 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
521 clk_gating
.ungate_work
);
523 cancel_delayed_work_sync(&hba
->clk_gating
.gate_work
);
525 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
526 if (hba
->clk_gating
.state
== CLKS_ON
) {
527 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
531 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
532 ufshcd_setup_clocks(hba
, true);
534 /* Exit from hibern8 */
535 if (ufshcd_can_hibern8_during_gating(hba
)) {
536 /* Prevent gating in this path */
537 hba
->clk_gating
.is_suspended
= true;
538 if (ufshcd_is_link_hibern8(hba
)) {
539 ret
= ufshcd_uic_hibern8_exit(hba
);
541 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
544 ufshcd_set_link_active(hba
);
546 hba
->clk_gating
.is_suspended
= false;
549 if (ufshcd_is_clkscaling_enabled(hba
))
550 devfreq_resume_device(hba
->devfreq
);
551 scsi_unblock_requests(hba
->host
);
555 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
556 * Also, exit from hibern8 mode and set the link as active.
557 * @hba: per adapter instance
558 * @async: This indicates whether caller should ungate clocks asynchronously.
560 int ufshcd_hold(struct ufs_hba
*hba
, bool async
)
565 if (!ufshcd_is_clkgating_allowed(hba
))
567 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
568 hba
->clk_gating
.active_reqs
++;
571 switch (hba
->clk_gating
.state
) {
575 if (cancel_delayed_work(&hba
->clk_gating
.gate_work
)) {
576 hba
->clk_gating
.state
= CLKS_ON
;
580 * If we here, it means gating work is either done or
581 * currently running. Hence, fall through to cancel gating
582 * work and to enable clocks.
585 scsi_block_requests(hba
->host
);
586 hba
->clk_gating
.state
= REQ_CLKS_ON
;
587 schedule_work(&hba
->clk_gating
.ungate_work
);
589 * fall through to check if we should wait for this
590 * work to be done or not.
595 hba
->clk_gating
.active_reqs
--;
599 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
600 flush_work(&hba
->clk_gating
.ungate_work
);
601 /* Make sure state is CLKS_ON before returning */
602 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
605 dev_err(hba
->dev
, "%s: clk gating is in invalid state %d\n",
606 __func__
, hba
->clk_gating
.state
);
609 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
614 static void ufshcd_gate_work(struct work_struct
*work
)
616 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
617 clk_gating
.gate_work
.work
);
620 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
621 if (hba
->clk_gating
.is_suspended
) {
622 hba
->clk_gating
.state
= CLKS_ON
;
626 if (hba
->clk_gating
.active_reqs
627 || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
628 || hba
->lrb_in_use
|| hba
->outstanding_tasks
629 || hba
->active_uic_cmd
|| hba
->uic_async_done
)
632 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
634 /* put the link into hibern8 mode before turning off clocks */
635 if (ufshcd_can_hibern8_during_gating(hba
)) {
636 if (ufshcd_uic_hibern8_enter(hba
)) {
637 hba
->clk_gating
.state
= CLKS_ON
;
640 ufshcd_set_link_hibern8(hba
);
643 if (ufshcd_is_clkscaling_enabled(hba
)) {
644 devfreq_suspend_device(hba
->devfreq
);
645 hba
->clk_scaling
.window_start_t
= 0;
648 if (!ufshcd_is_link_active(hba
))
649 ufshcd_setup_clocks(hba
, false);
651 /* If link is active, device ref_clk can't be switched off */
652 __ufshcd_setup_clocks(hba
, false, true);
655 * In case you are here to cancel this work the gating state
656 * would be marked as REQ_CLKS_ON. In this case keep the state
657 * as REQ_CLKS_ON which would anyway imply that clocks are off
658 * and a request to turn them on is pending. By doing this way,
659 * we keep the state machine in tact and this would ultimately
660 * prevent from doing cancel work multiple times when there are
661 * new requests arriving before the current cancel work is done.
663 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
664 if (hba
->clk_gating
.state
== REQ_CLKS_OFF
)
665 hba
->clk_gating
.state
= CLKS_OFF
;
668 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
673 /* host lock must be held before calling this variant */
674 static void __ufshcd_release(struct ufs_hba
*hba
)
676 if (!ufshcd_is_clkgating_allowed(hba
))
679 hba
->clk_gating
.active_reqs
--;
681 if (hba
->clk_gating
.active_reqs
|| hba
->clk_gating
.is_suspended
682 || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
683 || hba
->lrb_in_use
|| hba
->outstanding_tasks
684 || hba
->active_uic_cmd
|| hba
->uic_async_done
)
687 hba
->clk_gating
.state
= REQ_CLKS_OFF
;
688 schedule_delayed_work(&hba
->clk_gating
.gate_work
,
689 msecs_to_jiffies(hba
->clk_gating
.delay_ms
));
692 void ufshcd_release(struct ufs_hba
*hba
)
696 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
697 __ufshcd_release(hba
);
698 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
701 static ssize_t
ufshcd_clkgate_delay_show(struct device
*dev
,
702 struct device_attribute
*attr
, char *buf
)
704 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
706 return snprintf(buf
, PAGE_SIZE
, "%lu\n", hba
->clk_gating
.delay_ms
);
709 static ssize_t
ufshcd_clkgate_delay_store(struct device
*dev
,
710 struct device_attribute
*attr
, const char *buf
, size_t count
)
712 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
713 unsigned long flags
, value
;
715 if (kstrtoul(buf
, 0, &value
))
718 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
719 hba
->clk_gating
.delay_ms
= value
;
720 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
724 static void ufshcd_init_clk_gating(struct ufs_hba
*hba
)
726 if (!ufshcd_is_clkgating_allowed(hba
))
729 hba
->clk_gating
.delay_ms
= 150;
730 INIT_DELAYED_WORK(&hba
->clk_gating
.gate_work
, ufshcd_gate_work
);
731 INIT_WORK(&hba
->clk_gating
.ungate_work
, ufshcd_ungate_work
);
733 hba
->clk_gating
.delay_attr
.show
= ufshcd_clkgate_delay_show
;
734 hba
->clk_gating
.delay_attr
.store
= ufshcd_clkgate_delay_store
;
735 sysfs_attr_init(&hba
->clk_gating
.delay_attr
.attr
);
736 hba
->clk_gating
.delay_attr
.attr
.name
= "clkgate_delay_ms";
737 hba
->clk_gating
.delay_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
738 if (device_create_file(hba
->dev
, &hba
->clk_gating
.delay_attr
))
739 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_delay\n");
742 static void ufshcd_exit_clk_gating(struct ufs_hba
*hba
)
744 if (!ufshcd_is_clkgating_allowed(hba
))
746 device_remove_file(hba
->dev
, &hba
->clk_gating
.delay_attr
);
749 /* Must be called with host lock acquired */
750 static void ufshcd_clk_scaling_start_busy(struct ufs_hba
*hba
)
752 if (!ufshcd_is_clkscaling_enabled(hba
))
755 if (!hba
->clk_scaling
.is_busy_started
) {
756 hba
->clk_scaling
.busy_start_t
= ktime_get();
757 hba
->clk_scaling
.is_busy_started
= true;
761 static void ufshcd_clk_scaling_update_busy(struct ufs_hba
*hba
)
763 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
765 if (!ufshcd_is_clkscaling_enabled(hba
))
768 if (!hba
->outstanding_reqs
&& scaling
->is_busy_started
) {
769 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
770 scaling
->busy_start_t
));
771 scaling
->busy_start_t
= ktime_set(0, 0);
772 scaling
->is_busy_started
= false;
776 * ufshcd_send_command - Send SCSI or device management commands
777 * @hba: per adapter instance
778 * @task_tag: Task tag of the command
781 void ufshcd_send_command(struct ufs_hba
*hba
, unsigned int task_tag
)
783 ufshcd_clk_scaling_start_busy(hba
);
784 __set_bit(task_tag
, &hba
->outstanding_reqs
);
785 ufshcd_writel(hba
, 1 << task_tag
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
789 * ufshcd_copy_sense_data - Copy sense data in case of check condition
790 * @lrb - pointer to local reference block
792 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb
*lrbp
)
795 if (lrbp
->sense_buffer
&&
796 ufshcd_get_rsp_upiu_data_seg_len(lrbp
->ucd_rsp_ptr
)) {
797 len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.sense_data_len
);
798 memcpy(lrbp
->sense_buffer
,
799 lrbp
->ucd_rsp_ptr
->sr
.sense_data
,
800 min_t(int, len
, SCSI_SENSE_BUFFERSIZE
));
805 * ufshcd_copy_query_response() - Copy the Query Response and the data
807 * @hba: per adapter instance
808 * @lrb - pointer to local reference block
811 int ufshcd_copy_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
813 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
815 memcpy(&query_res
->upiu_res
, &lrbp
->ucd_rsp_ptr
->qr
, QUERY_OSF_SIZE
);
817 /* Get the descriptor */
818 if (lrbp
->ucd_rsp_ptr
->qr
.opcode
== UPIU_QUERY_OPCODE_READ_DESC
) {
819 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+
820 GENERAL_UPIU_REQUEST_SIZE
;
824 /* data segment length */
825 resp_len
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->header
.dword_2
) &
826 MASK_QUERY_DATA_SEG_LEN
;
827 buf_len
= be16_to_cpu(
828 hba
->dev_cmd
.query
.request
.upiu_req
.length
);
829 if (likely(buf_len
>= resp_len
)) {
830 memcpy(hba
->dev_cmd
.query
.descriptor
, descp
, resp_len
);
833 "%s: Response size is bigger than buffer",
843 * ufshcd_hba_capabilities - Read controller capabilities
844 * @hba: per adapter instance
846 static inline void ufshcd_hba_capabilities(struct ufs_hba
*hba
)
848 hba
->capabilities
= ufshcd_readl(hba
, REG_CONTROLLER_CAPABILITIES
);
850 /* nutrs and nutmrs are 0 based values */
851 hba
->nutrs
= (hba
->capabilities
& MASK_TRANSFER_REQUESTS_SLOTS
) + 1;
853 ((hba
->capabilities
& MASK_TASK_MANAGEMENT_REQUEST_SLOTS
) >> 16) + 1;
857 * ufshcd_ready_for_uic_cmd - Check if controller is ready
858 * to accept UIC commands
859 * @hba: per adapter instance
860 * Return true on success, else false
862 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba
*hba
)
864 if (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & UIC_COMMAND_READY
)
871 * ufshcd_get_upmcrs - Get the power mode change request status
872 * @hba: Pointer to adapter instance
874 * This function gets the UPMCRS field of HCS register
875 * Returns value of UPMCRS field
877 static inline u8
ufshcd_get_upmcrs(struct ufs_hba
*hba
)
879 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) >> 8) & 0x7;
883 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
884 * @hba: per adapter instance
885 * @uic_cmd: UIC command
887 * Mutex must be held.
890 ufshcd_dispatch_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
892 WARN_ON(hba
->active_uic_cmd
);
894 hba
->active_uic_cmd
= uic_cmd
;
897 ufshcd_writel(hba
, uic_cmd
->argument1
, REG_UIC_COMMAND_ARG_1
);
898 ufshcd_writel(hba
, uic_cmd
->argument2
, REG_UIC_COMMAND_ARG_2
);
899 ufshcd_writel(hba
, uic_cmd
->argument3
, REG_UIC_COMMAND_ARG_3
);
902 ufshcd_writel(hba
, uic_cmd
->command
& COMMAND_OPCODE_MASK
,
907 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
908 * @hba: per adapter instance
909 * @uic_command: UIC command
911 * Must be called with mutex held.
912 * Returns 0 only if success.
915 ufshcd_wait_for_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
920 if (wait_for_completion_timeout(&uic_cmd
->done
,
921 msecs_to_jiffies(UIC_CMD_TIMEOUT
)))
922 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
926 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
927 hba
->active_uic_cmd
= NULL
;
928 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
934 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
935 * @hba: per adapter instance
936 * @uic_cmd: UIC command
938 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
939 * with mutex held and host_lock locked.
940 * Returns 0 only if success.
943 __ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
945 if (!ufshcd_ready_for_uic_cmd(hba
)) {
947 "Controller not ready to accept UIC commands\n");
951 init_completion(&uic_cmd
->done
);
953 ufshcd_dispatch_uic_cmd(hba
, uic_cmd
);
959 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
960 * @hba: per adapter instance
961 * @uic_cmd: UIC command
963 * Returns 0 only if success.
966 ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
971 ufshcd_hold(hba
, false);
972 mutex_lock(&hba
->uic_cmd_mutex
);
973 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
974 ret
= __ufshcd_send_uic_cmd(hba
, uic_cmd
);
975 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
977 ret
= ufshcd_wait_for_uic_cmd(hba
, uic_cmd
);
979 mutex_unlock(&hba
->uic_cmd_mutex
);
986 * ufshcd_map_sg - Map scatter-gather list to prdt
987 * @lrbp - pointer to local reference block
989 * Returns 0 in case of success, non-zero value in case of failure
991 static int ufshcd_map_sg(struct ufshcd_lrb
*lrbp
)
993 struct ufshcd_sg_entry
*prd_table
;
994 struct scatterlist
*sg
;
995 struct scsi_cmnd
*cmd
;
1000 sg_segments
= scsi_dma_map(cmd
);
1001 if (sg_segments
< 0)
1005 lrbp
->utr_descriptor_ptr
->prd_table_length
=
1006 cpu_to_le16((u16
) (sg_segments
));
1008 prd_table
= (struct ufshcd_sg_entry
*)lrbp
->ucd_prdt_ptr
;
1010 scsi_for_each_sg(cmd
, sg
, sg_segments
, i
) {
1012 cpu_to_le32(((u32
) sg_dma_len(sg
))-1);
1013 prd_table
[i
].base_addr
=
1014 cpu_to_le32(lower_32_bits(sg
->dma_address
));
1015 prd_table
[i
].upper_addr
=
1016 cpu_to_le32(upper_32_bits(sg
->dma_address
));
1019 lrbp
->utr_descriptor_ptr
->prd_table_length
= 0;
1026 * ufshcd_enable_intr - enable interrupts
1027 * @hba: per adapter instance
1028 * @intrs: interrupt bits
1030 static void ufshcd_enable_intr(struct ufs_hba
*hba
, u32 intrs
)
1032 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
1034 if (hba
->ufs_version
== UFSHCI_VERSION_10
) {
1036 rw
= set
& INTERRUPT_MASK_RW_VER_10
;
1037 set
= rw
| ((set
^ intrs
) & intrs
);
1042 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
1046 * ufshcd_disable_intr - disable interrupts
1047 * @hba: per adapter instance
1048 * @intrs: interrupt bits
1050 static void ufshcd_disable_intr(struct ufs_hba
*hba
, u32 intrs
)
1052 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
1054 if (hba
->ufs_version
== UFSHCI_VERSION_10
) {
1056 rw
= (set
& INTERRUPT_MASK_RW_VER_10
) &
1057 ~(intrs
& INTERRUPT_MASK_RW_VER_10
);
1058 set
= rw
| ((set
& intrs
) & ~INTERRUPT_MASK_RW_VER_10
);
1064 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
1068 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
1069 * descriptor according to request
1070 * @lrbp: pointer to local reference block
1071 * @upiu_flags: flags required in the header
1072 * @cmd_dir: requests data direction
1074 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb
*lrbp
,
1075 u32
*upiu_flags
, enum dma_data_direction cmd_dir
)
1077 struct utp_transfer_req_desc
*req_desc
= lrbp
->utr_descriptor_ptr
;
1081 if (cmd_dir
== DMA_FROM_DEVICE
) {
1082 data_direction
= UTP_DEVICE_TO_HOST
;
1083 *upiu_flags
= UPIU_CMD_FLAGS_READ
;
1084 } else if (cmd_dir
== DMA_TO_DEVICE
) {
1085 data_direction
= UTP_HOST_TO_DEVICE
;
1086 *upiu_flags
= UPIU_CMD_FLAGS_WRITE
;
1088 data_direction
= UTP_NO_DATA_TRANSFER
;
1089 *upiu_flags
= UPIU_CMD_FLAGS_NONE
;
1092 dword_0
= data_direction
| (lrbp
->command_type
1093 << UPIU_COMMAND_TYPE_OFFSET
);
1095 dword_0
|= UTP_REQ_DESC_INT_CMD
;
1097 /* Transfer request descriptor header fields */
1098 req_desc
->header
.dword_0
= cpu_to_le32(dword_0
);
1101 * assigning invalid value for command status. Controller
1102 * updates OCS on command completion, with the command
1105 req_desc
->header
.dword_2
=
1106 cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
1110 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
1112 * @lrbp - local reference block pointer
1113 * @upiu_flags - flags
1116 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb
*lrbp
, u32 upiu_flags
)
1118 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
1120 /* command descriptor fields */
1121 ucd_req_ptr
->header
.dword_0
= UPIU_HEADER_DWORD(
1122 UPIU_TRANSACTION_COMMAND
, upiu_flags
,
1123 lrbp
->lun
, lrbp
->task_tag
);
1124 ucd_req_ptr
->header
.dword_1
= UPIU_HEADER_DWORD(
1125 UPIU_COMMAND_SET_TYPE_SCSI
, 0, 0, 0);
1127 /* Total EHS length and Data segment length will be zero */
1128 ucd_req_ptr
->header
.dword_2
= 0;
1130 ucd_req_ptr
->sc
.exp_data_transfer_len
=
1131 cpu_to_be32(lrbp
->cmd
->sdb
.length
);
1133 memcpy(ucd_req_ptr
->sc
.cdb
, lrbp
->cmd
->cmnd
,
1134 (min_t(unsigned short, lrbp
->cmd
->cmd_len
, MAX_CDB_SIZE
)));
1138 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
1141 * @lrbp: local reference block pointer
1142 * @upiu_flags: flags
1144 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba
*hba
,
1145 struct ufshcd_lrb
*lrbp
, u32 upiu_flags
)
1147 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
1148 struct ufs_query
*query
= &hba
->dev_cmd
.query
;
1149 u16 len
= be16_to_cpu(query
->request
.upiu_req
.length
);
1150 u8
*descp
= (u8
*)lrbp
->ucd_req_ptr
+ GENERAL_UPIU_REQUEST_SIZE
;
1152 /* Query request header */
1153 ucd_req_ptr
->header
.dword_0
= UPIU_HEADER_DWORD(
1154 UPIU_TRANSACTION_QUERY_REQ
, upiu_flags
,
1155 lrbp
->lun
, lrbp
->task_tag
);
1156 ucd_req_ptr
->header
.dword_1
= UPIU_HEADER_DWORD(
1157 0, query
->request
.query_func
, 0, 0);
1159 /* Data segment length */
1160 ucd_req_ptr
->header
.dword_2
= UPIU_HEADER_DWORD(
1161 0, 0, len
>> 8, (u8
)len
);
1163 /* Copy the Query Request buffer as is */
1164 memcpy(&ucd_req_ptr
->qr
, &query
->request
.upiu_req
,
1167 /* Copy the Descriptor */
1168 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
1169 memcpy(descp
, query
->descriptor
, len
);
1173 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb
*lrbp
)
1175 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
1177 memset(ucd_req_ptr
, 0, sizeof(struct utp_upiu_req
));
1179 /* command descriptor fields */
1180 ucd_req_ptr
->header
.dword_0
=
1182 UPIU_TRANSACTION_NOP_OUT
, 0, 0, lrbp
->task_tag
);
1186 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
1187 * @hba - per adapter instance
1188 * @lrb - pointer to local reference block
1190 static int ufshcd_compose_upiu(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
1195 switch (lrbp
->command_type
) {
1196 case UTP_CMD_TYPE_SCSI
:
1197 if (likely(lrbp
->cmd
)) {
1198 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
,
1199 lrbp
->cmd
->sc_data_direction
);
1200 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp
, upiu_flags
);
1205 case UTP_CMD_TYPE_DEV_MANAGE
:
1206 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
);
1207 if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_QUERY
)
1208 ufshcd_prepare_utp_query_req_upiu(
1209 hba
, lrbp
, upiu_flags
);
1210 else if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_NOP
)
1211 ufshcd_prepare_utp_nop_upiu(lrbp
);
1215 case UTP_CMD_TYPE_UFS
:
1216 /* For UFS native command implementation */
1218 dev_err(hba
->dev
, "%s: UFS native command are not supported\n",
1223 dev_err(hba
->dev
, "%s: unknown command type: 0x%x\n",
1224 __func__
, lrbp
->command_type
);
1226 } /* end of switch */
1232 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
1233 * @scsi_lun: scsi LUN id
1235 * Returns UPIU LUN id
1237 static inline u8
ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun
)
1239 if (scsi_is_wlun(scsi_lun
))
1240 return (scsi_lun
& UFS_UPIU_MAX_UNIT_NUM_ID
)
1243 return scsi_lun
& UFS_UPIU_MAX_UNIT_NUM_ID
;
1247 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
1248 * @scsi_lun: UPIU W-LUN id
1250 * Returns SCSI W-LUN id
1252 static inline u16
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id
)
1254 return (upiu_wlun_id
& ~UFS_UPIU_WLUN_ID
) | SCSI_W_LUN_BASE
;
1258 * ufshcd_queuecommand - main entry point for SCSI requests
1259 * @cmd: command from SCSI Midlayer
1260 * @done: call back function
1262 * Returns 0 for success, non-zero in case of failure
1264 static int ufshcd_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*cmd
)
1266 struct ufshcd_lrb
*lrbp
;
1267 struct ufs_hba
*hba
;
1268 unsigned long flags
;
1272 hba
= shost_priv(host
);
1274 tag
= cmd
->request
->tag
;
1276 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1277 switch (hba
->ufshcd_state
) {
1278 case UFSHCD_STATE_OPERATIONAL
:
1280 case UFSHCD_STATE_RESET
:
1281 err
= SCSI_MLQUEUE_HOST_BUSY
;
1283 case UFSHCD_STATE_ERROR
:
1284 set_host_byte(cmd
, DID_ERROR
);
1285 cmd
->scsi_done(cmd
);
1288 dev_WARN_ONCE(hba
->dev
, 1, "%s: invalid state %d\n",
1289 __func__
, hba
->ufshcd_state
);
1290 set_host_byte(cmd
, DID_BAD_TARGET
);
1291 cmd
->scsi_done(cmd
);
1294 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1296 /* acquire the tag to make sure device cmds don't use it */
1297 if (test_and_set_bit_lock(tag
, &hba
->lrb_in_use
)) {
1299 * Dev manage command in progress, requeue the command.
1300 * Requeuing the command helps in cases where the request *may*
1301 * find different tag instead of waiting for dev manage command
1304 err
= SCSI_MLQUEUE_HOST_BUSY
;
1308 err
= ufshcd_hold(hba
, true);
1310 err
= SCSI_MLQUEUE_HOST_BUSY
;
1311 clear_bit_unlock(tag
, &hba
->lrb_in_use
);
1314 WARN_ON(hba
->clk_gating
.state
!= CLKS_ON
);
1316 lrbp
= &hba
->lrb
[tag
];
1320 lrbp
->sense_bufflen
= SCSI_SENSE_BUFFERSIZE
;
1321 lrbp
->sense_buffer
= cmd
->sense_buffer
;
1322 lrbp
->task_tag
= tag
;
1323 lrbp
->lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
1324 lrbp
->intr_cmd
= false;
1325 lrbp
->command_type
= UTP_CMD_TYPE_SCSI
;
1327 /* form UPIU before issuing the command */
1328 ufshcd_compose_upiu(hba
, lrbp
);
1329 err
= ufshcd_map_sg(lrbp
);
1332 clear_bit_unlock(tag
, &hba
->lrb_in_use
);
1336 /* issue command to the controller */
1337 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1338 ufshcd_send_command(hba
, tag
);
1340 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1345 static int ufshcd_compose_dev_cmd(struct ufs_hba
*hba
,
1346 struct ufshcd_lrb
*lrbp
, enum dev_cmd_type cmd_type
, int tag
)
1349 lrbp
->sense_bufflen
= 0;
1350 lrbp
->sense_buffer
= NULL
;
1351 lrbp
->task_tag
= tag
;
1352 lrbp
->lun
= 0; /* device management cmd is not specific to any LUN */
1353 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
1354 lrbp
->intr_cmd
= true; /* No interrupt aggregation */
1355 hba
->dev_cmd
.type
= cmd_type
;
1357 return ufshcd_compose_upiu(hba
, lrbp
);
1361 ufshcd_clear_cmd(struct ufs_hba
*hba
, int tag
)
1364 unsigned long flags
;
1365 u32 mask
= 1 << tag
;
1367 /* clear outstanding transaction before retry */
1368 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1369 ufshcd_utrl_clear(hba
, tag
);
1370 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1373 * wait for for h/w to clear corresponding bit in door-bell.
1374 * max. wait is 1 sec.
1376 err
= ufshcd_wait_for_register(hba
,
1377 REG_UTP_TRANSFER_REQ_DOOR_BELL
,
1378 mask
, ~mask
, 1000, 1000);
1384 ufshcd_check_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
1386 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
1388 /* Get the UPIU response */
1389 query_res
->response
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
) >>
1390 UPIU_RSP_CODE_OFFSET
;
1391 return query_res
->response
;
1395 * ufshcd_dev_cmd_completion() - handles device management command responses
1396 * @hba: per adapter instance
1397 * @lrbp: pointer to local reference block
1400 ufshcd_dev_cmd_completion(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
1405 resp
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
1408 case UPIU_TRANSACTION_NOP_IN
:
1409 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_NOP
) {
1411 dev_err(hba
->dev
, "%s: unexpected response %x\n",
1415 case UPIU_TRANSACTION_QUERY_RSP
:
1416 err
= ufshcd_check_query_response(hba
, lrbp
);
1418 err
= ufshcd_copy_query_response(hba
, lrbp
);
1420 case UPIU_TRANSACTION_REJECT_UPIU
:
1421 /* TODO: handle Reject UPIU Response */
1423 dev_err(hba
->dev
, "%s: Reject UPIU not fully implemented\n",
1428 dev_err(hba
->dev
, "%s: Invalid device management cmd response: %x\n",
1436 static int ufshcd_wait_for_dev_cmd(struct ufs_hba
*hba
,
1437 struct ufshcd_lrb
*lrbp
, int max_timeout
)
1440 unsigned long time_left
;
1441 unsigned long flags
;
1443 time_left
= wait_for_completion_timeout(hba
->dev_cmd
.complete
,
1444 msecs_to_jiffies(max_timeout
));
1446 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1447 hba
->dev_cmd
.complete
= NULL
;
1448 if (likely(time_left
)) {
1449 err
= ufshcd_get_tr_ocs(lrbp
);
1451 err
= ufshcd_dev_cmd_completion(hba
, lrbp
);
1453 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1457 if (!ufshcd_clear_cmd(hba
, lrbp
->task_tag
))
1458 /* sucessfully cleared the command, retry if needed */
1466 * ufshcd_get_dev_cmd_tag - Get device management command tag
1467 * @hba: per-adapter instance
1468 * @tag: pointer to variable with available slot value
1470 * Get a free slot and lock it until device management command
1473 * Returns false if free slot is unavailable for locking, else
1474 * return true with tag value in @tag.
1476 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba
*hba
, int *tag_out
)
1486 tmp
= ~hba
->lrb_in_use
;
1487 tag
= find_last_bit(&tmp
, hba
->nutrs
);
1488 if (tag
>= hba
->nutrs
)
1490 } while (test_and_set_bit_lock(tag
, &hba
->lrb_in_use
));
1498 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba
*hba
, int tag
)
1500 clear_bit_unlock(tag
, &hba
->lrb_in_use
);
1504 * ufshcd_exec_dev_cmd - API for sending device management requests
1506 * @cmd_type - specifies the type (NOP, Query...)
1507 * @timeout - time in seconds
1509 * NOTE: Since there is only one available tag for device management commands,
1510 * it is expected you hold the hba->dev_cmd.lock mutex.
1512 static int ufshcd_exec_dev_cmd(struct ufs_hba
*hba
,
1513 enum dev_cmd_type cmd_type
, int timeout
)
1515 struct ufshcd_lrb
*lrbp
;
1518 struct completion wait
;
1519 unsigned long flags
;
1522 * Get free slot, sleep if slots are unavailable.
1523 * Even though we use wait_event() which sleeps indefinitely,
1524 * the maximum wait time is bounded by SCSI request timeout.
1526 wait_event(hba
->dev_cmd
.tag_wq
, ufshcd_get_dev_cmd_tag(hba
, &tag
));
1528 init_completion(&wait
);
1529 lrbp
= &hba
->lrb
[tag
];
1531 err
= ufshcd_compose_dev_cmd(hba
, lrbp
, cmd_type
, tag
);
1535 hba
->dev_cmd
.complete
= &wait
;
1537 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1538 ufshcd_send_command(hba
, tag
);
1539 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1541 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, timeout
);
1544 ufshcd_put_dev_cmd_tag(hba
, tag
);
1545 wake_up(&hba
->dev_cmd
.tag_wq
);
1550 * ufshcd_init_query() - init the query response and request parameters
1551 * @hba: per-adapter instance
1552 * @request: address of the request pointer to be initialized
1553 * @response: address of the response pointer to be initialized
1554 * @opcode: operation to perform
1555 * @idn: flag idn to access
1556 * @index: LU number to access
1557 * @selector: query/flag/descriptor further identification
1559 static inline void ufshcd_init_query(struct ufs_hba
*hba
,
1560 struct ufs_query_req
**request
, struct ufs_query_res
**response
,
1561 enum query_opcode opcode
, u8 idn
, u8 index
, u8 selector
)
1563 *request
= &hba
->dev_cmd
.query
.request
;
1564 *response
= &hba
->dev_cmd
.query
.response
;
1565 memset(*request
, 0, sizeof(struct ufs_query_req
));
1566 memset(*response
, 0, sizeof(struct ufs_query_res
));
1567 (*request
)->upiu_req
.opcode
= opcode
;
1568 (*request
)->upiu_req
.idn
= idn
;
1569 (*request
)->upiu_req
.index
= index
;
1570 (*request
)->upiu_req
.selector
= selector
;
1574 * ufshcd_query_flag() - API function for sending flag query requests
1575 * hba: per-adapter instance
1576 * query_opcode: flag query to perform
1577 * idn: flag idn to access
1578 * flag_res: the flag value after the query request completes
1580 * Returns 0 for success, non-zero in case of failure
1582 static int ufshcd_query_flag(struct ufs_hba
*hba
, enum query_opcode opcode
,
1583 enum flag_idn idn
, bool *flag_res
)
1585 struct ufs_query_req
*request
= NULL
;
1586 struct ufs_query_res
*response
= NULL
;
1587 int err
, index
= 0, selector
= 0;
1591 ufshcd_hold(hba
, false);
1592 mutex_lock(&hba
->dev_cmd
.lock
);
1593 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
1597 case UPIU_QUERY_OPCODE_SET_FLAG
:
1598 case UPIU_QUERY_OPCODE_CLEAR_FLAG
:
1599 case UPIU_QUERY_OPCODE_TOGGLE_FLAG
:
1600 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
1602 case UPIU_QUERY_OPCODE_READ_FLAG
:
1603 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
1605 /* No dummy reads */
1606 dev_err(hba
->dev
, "%s: Invalid argument for read request\n",
1614 "%s: Expected query flag opcode but got = %d\n",
1620 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
1624 "%s: Sending flag query for idn %d failed, err = %d\n",
1625 __func__
, idn
, err
);
1630 *flag_res
= (be32_to_cpu(response
->upiu_res
.value
) &
1631 MASK_QUERY_UPIU_FLAG_LOC
) & 0x1;
1634 mutex_unlock(&hba
->dev_cmd
.lock
);
1635 ufshcd_release(hba
);
1640 * ufshcd_query_attr - API function for sending attribute requests
1641 * hba: per-adapter instance
1642 * opcode: attribute opcode
1643 * idn: attribute idn to access
1644 * index: index field
1645 * selector: selector field
1646 * attr_val: the attribute value after the query request completes
1648 * Returns 0 for success, non-zero in case of failure
1650 static int ufshcd_query_attr(struct ufs_hba
*hba
, enum query_opcode opcode
,
1651 enum attr_idn idn
, u8 index
, u8 selector
, u32
*attr_val
)
1653 struct ufs_query_req
*request
= NULL
;
1654 struct ufs_query_res
*response
= NULL
;
1659 ufshcd_hold(hba
, false);
1661 dev_err(hba
->dev
, "%s: attribute value required for opcode 0x%x\n",
1667 mutex_lock(&hba
->dev_cmd
.lock
);
1668 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
1672 case UPIU_QUERY_OPCODE_WRITE_ATTR
:
1673 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
1674 request
->upiu_req
.value
= cpu_to_be32(*attr_val
);
1676 case UPIU_QUERY_OPCODE_READ_ATTR
:
1677 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
1680 dev_err(hba
->dev
, "%s: Expected query attr opcode but got = 0x%.2x\n",
1686 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
1689 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1690 __func__
, opcode
, idn
, err
);
1694 *attr_val
= be32_to_cpu(response
->upiu_res
.value
);
1697 mutex_unlock(&hba
->dev_cmd
.lock
);
1699 ufshcd_release(hba
);
1704 * ufshcd_query_descriptor - API function for sending descriptor requests
1705 * hba: per-adapter instance
1706 * opcode: attribute opcode
1707 * idn: attribute idn to access
1708 * index: index field
1709 * selector: selector field
1710 * desc_buf: the buffer that contains the descriptor
1711 * buf_len: length parameter passed to the device
1713 * Returns 0 for success, non-zero in case of failure.
1714 * The buf_len parameter will contain, on return, the length parameter
1715 * received on the response.
1717 static int ufshcd_query_descriptor(struct ufs_hba
*hba
,
1718 enum query_opcode opcode
, enum desc_idn idn
, u8 index
,
1719 u8 selector
, u8
*desc_buf
, int *buf_len
)
1721 struct ufs_query_req
*request
= NULL
;
1722 struct ufs_query_res
*response
= NULL
;
1727 ufshcd_hold(hba
, false);
1729 dev_err(hba
->dev
, "%s: descriptor buffer required for opcode 0x%x\n",
1735 if (*buf_len
<= QUERY_DESC_MIN_SIZE
|| *buf_len
> QUERY_DESC_MAX_SIZE
) {
1736 dev_err(hba
->dev
, "%s: descriptor buffer size (%d) is out of range\n",
1737 __func__
, *buf_len
);
1742 mutex_lock(&hba
->dev_cmd
.lock
);
1743 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
1745 hba
->dev_cmd
.query
.descriptor
= desc_buf
;
1746 request
->upiu_req
.length
= cpu_to_be16(*buf_len
);
1749 case UPIU_QUERY_OPCODE_WRITE_DESC
:
1750 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
1752 case UPIU_QUERY_OPCODE_READ_DESC
:
1753 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
1757 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1763 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
1766 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1767 __func__
, opcode
, idn
, err
);
1771 hba
->dev_cmd
.query
.descriptor
= NULL
;
1772 *buf_len
= be16_to_cpu(response
->upiu_res
.length
);
1775 mutex_unlock(&hba
->dev_cmd
.lock
);
1777 ufshcd_release(hba
);
1782 * ufshcd_read_desc_param - read the specified descriptor parameter
1783 * @hba: Pointer to adapter instance
1784 * @desc_id: descriptor idn value
1785 * @desc_index: descriptor index
1786 * @param_offset: offset of the parameter to read
1787 * @param_read_buf: pointer to buffer where parameter would be read
1788 * @param_size: sizeof(param_read_buf)
1790 * Return 0 in case of success, non-zero otherwise
1792 static int ufshcd_read_desc_param(struct ufs_hba
*hba
,
1793 enum desc_idn desc_id
,
1802 bool is_kmalloc
= true;
1805 if (desc_id
>= QUERY_DESC_IDN_MAX
)
1808 buff_len
= ufs_query_desc_max_size
[desc_id
];
1809 if ((param_offset
+ param_size
) > buff_len
)
1812 if (!param_offset
&& (param_size
== buff_len
)) {
1813 /* memory space already available to hold full descriptor */
1814 desc_buf
= param_read_buf
;
1817 /* allocate memory to hold full descriptor */
1818 desc_buf
= kmalloc(buff_len
, GFP_KERNEL
);
1823 ret
= ufshcd_query_descriptor(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
1824 desc_id
, desc_index
, 0, desc_buf
,
1827 if (ret
|| (buff_len
< ufs_query_desc_max_size
[desc_id
]) ||
1828 (desc_buf
[QUERY_DESC_LENGTH_OFFSET
] !=
1829 ufs_query_desc_max_size
[desc_id
])
1830 || (desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
] != desc_id
)) {
1831 dev_err(hba
->dev
, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
1832 __func__
, desc_id
, param_offset
, buff_len
, ret
);
1840 memcpy(param_read_buf
, &desc_buf
[param_offset
], param_size
);
1847 static inline int ufshcd_read_desc(struct ufs_hba
*hba
,
1848 enum desc_idn desc_id
,
1853 return ufshcd_read_desc_param(hba
, desc_id
, desc_index
, 0, buf
, size
);
1856 static inline int ufshcd_read_power_desc(struct ufs_hba
*hba
,
1860 return ufshcd_read_desc(hba
, QUERY_DESC_IDN_POWER
, 0, buf
, size
);
1864 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
1865 * @hba: Pointer to adapter instance
1867 * @param_offset: offset of the parameter to read
1868 * @param_read_buf: pointer to buffer where parameter would be read
1869 * @param_size: sizeof(param_read_buf)
1871 * Return 0 in case of success, non-zero otherwise
1873 static inline int ufshcd_read_unit_desc_param(struct ufs_hba
*hba
,
1875 enum unit_desc_param param_offset
,
1880 * Unit descriptors are only available for general purpose LUs (LUN id
1881 * from 0 to 7) and RPMB Well known LU.
1883 if (lun
!= UFS_UPIU_RPMB_WLUN
&& (lun
>= UFS_UPIU_MAX_GENERAL_LUN
))
1886 return ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_UNIT
, lun
,
1887 param_offset
, param_read_buf
, param_size
);
1891 * ufshcd_memory_alloc - allocate memory for host memory space data structures
1892 * @hba: per adapter instance
1894 * 1. Allocate DMA memory for Command Descriptor array
1895 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
1896 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
1897 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
1899 * 4. Allocate memory for local reference block(lrb).
1901 * Returns 0 for success, non-zero in case of failure
1903 static int ufshcd_memory_alloc(struct ufs_hba
*hba
)
1905 size_t utmrdl_size
, utrdl_size
, ucdl_size
;
1907 /* Allocate memory for UTP command descriptors */
1908 ucdl_size
= (sizeof(struct utp_transfer_cmd_desc
) * hba
->nutrs
);
1909 hba
->ucdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
1911 &hba
->ucdl_dma_addr
,
1915 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
1916 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
1917 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
1918 * be aligned to 128 bytes as well
1920 if (!hba
->ucdl_base_addr
||
1921 WARN_ON(hba
->ucdl_dma_addr
& (PAGE_SIZE
- 1))) {
1923 "Command Descriptor Memory allocation failed\n");
1928 * Allocate memory for UTP Transfer descriptors
1929 * UFSHCI requires 1024 byte alignment of UTRD
1931 utrdl_size
= (sizeof(struct utp_transfer_req_desc
) * hba
->nutrs
);
1932 hba
->utrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
1934 &hba
->utrdl_dma_addr
,
1936 if (!hba
->utrdl_base_addr
||
1937 WARN_ON(hba
->utrdl_dma_addr
& (PAGE_SIZE
- 1))) {
1939 "Transfer Descriptor Memory allocation failed\n");
1944 * Allocate memory for UTP Task Management descriptors
1945 * UFSHCI requires 1024 byte alignment of UTMRD
1947 utmrdl_size
= sizeof(struct utp_task_req_desc
) * hba
->nutmrs
;
1948 hba
->utmrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
1950 &hba
->utmrdl_dma_addr
,
1952 if (!hba
->utmrdl_base_addr
||
1953 WARN_ON(hba
->utmrdl_dma_addr
& (PAGE_SIZE
- 1))) {
1955 "Task Management Descriptor Memory allocation failed\n");
1959 /* Allocate memory for local reference block */
1960 hba
->lrb
= devm_kzalloc(hba
->dev
,
1961 hba
->nutrs
* sizeof(struct ufshcd_lrb
),
1964 dev_err(hba
->dev
, "LRB Memory allocation failed\n");
1973 * ufshcd_host_memory_configure - configure local reference block with
1975 * @hba: per adapter instance
1977 * Configure Host memory space
1978 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
1980 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
1982 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
1983 * into local reference block.
1985 static void ufshcd_host_memory_configure(struct ufs_hba
*hba
)
1987 struct utp_transfer_cmd_desc
*cmd_descp
;
1988 struct utp_transfer_req_desc
*utrdlp
;
1989 dma_addr_t cmd_desc_dma_addr
;
1990 dma_addr_t cmd_desc_element_addr
;
1991 u16 response_offset
;
1996 utrdlp
= hba
->utrdl_base_addr
;
1997 cmd_descp
= hba
->ucdl_base_addr
;
2000 offsetof(struct utp_transfer_cmd_desc
, response_upiu
);
2002 offsetof(struct utp_transfer_cmd_desc
, prd_table
);
2004 cmd_desc_size
= sizeof(struct utp_transfer_cmd_desc
);
2005 cmd_desc_dma_addr
= hba
->ucdl_dma_addr
;
2007 for (i
= 0; i
< hba
->nutrs
; i
++) {
2008 /* Configure UTRD with command descriptor base address */
2009 cmd_desc_element_addr
=
2010 (cmd_desc_dma_addr
+ (cmd_desc_size
* i
));
2011 utrdlp
[i
].command_desc_base_addr_lo
=
2012 cpu_to_le32(lower_32_bits(cmd_desc_element_addr
));
2013 utrdlp
[i
].command_desc_base_addr_hi
=
2014 cpu_to_le32(upper_32_bits(cmd_desc_element_addr
));
2016 /* Response upiu and prdt offset should be in double words */
2017 utrdlp
[i
].response_upiu_offset
=
2018 cpu_to_le16((response_offset
>> 2));
2019 utrdlp
[i
].prd_table_offset
=
2020 cpu_to_le16((prdt_offset
>> 2));
2021 utrdlp
[i
].response_upiu_length
=
2022 cpu_to_le16(ALIGNED_UPIU_SIZE
>> 2);
2024 hba
->lrb
[i
].utr_descriptor_ptr
= (utrdlp
+ i
);
2025 hba
->lrb
[i
].ucd_req_ptr
=
2026 (struct utp_upiu_req
*)(cmd_descp
+ i
);
2027 hba
->lrb
[i
].ucd_rsp_ptr
=
2028 (struct utp_upiu_rsp
*)cmd_descp
[i
].response_upiu
;
2029 hba
->lrb
[i
].ucd_prdt_ptr
=
2030 (struct ufshcd_sg_entry
*)cmd_descp
[i
].prd_table
;
2035 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
2036 * @hba: per adapter instance
2038 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
2039 * in order to initialize the Unipro link startup procedure.
2040 * Once the Unipro links are up, the device connected to the controller
2043 * Returns 0 on success, non-zero value on failure
2045 static int ufshcd_dme_link_startup(struct ufs_hba
*hba
)
2047 struct uic_command uic_cmd
= {0};
2050 uic_cmd
.command
= UIC_CMD_DME_LINK_STARTUP
;
2052 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
2055 "dme-link-startup: error code %d\n", ret
);
2060 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
2061 * @hba: per adapter instance
2062 * @attr_sel: uic command argument1
2063 * @attr_set: attribute set type as uic command argument2
2064 * @mib_val: setting value as uic command argument3
2065 * @peer: indicate whether peer or local
2067 * Returns 0 on success, non-zero value on failure
2069 int ufshcd_dme_set_attr(struct ufs_hba
*hba
, u32 attr_sel
,
2070 u8 attr_set
, u32 mib_val
, u8 peer
)
2072 struct uic_command uic_cmd
= {0};
2073 static const char *const action
[] = {
2077 const char *set
= action
[!!peer
];
2080 uic_cmd
.command
= peer
?
2081 UIC_CMD_DME_PEER_SET
: UIC_CMD_DME_SET
;
2082 uic_cmd
.argument1
= attr_sel
;
2083 uic_cmd
.argument2
= UIC_ARG_ATTR_TYPE(attr_set
);
2084 uic_cmd
.argument3
= mib_val
;
2086 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
2088 dev_err(hba
->dev
, "%s: attr-id 0x%x val 0x%x error code %d\n",
2089 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
, ret
);
2093 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr
);
2096 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
2097 * @hba: per adapter instance
2098 * @attr_sel: uic command argument1
2099 * @mib_val: the value of the attribute as returned by the UIC command
2100 * @peer: indicate whether peer or local
2102 * Returns 0 on success, non-zero value on failure
2104 int ufshcd_dme_get_attr(struct ufs_hba
*hba
, u32 attr_sel
,
2105 u32
*mib_val
, u8 peer
)
2107 struct uic_command uic_cmd
= {0};
2108 static const char *const action
[] = {
2112 const char *get
= action
[!!peer
];
2115 uic_cmd
.command
= peer
?
2116 UIC_CMD_DME_PEER_GET
: UIC_CMD_DME_GET
;
2117 uic_cmd
.argument1
= attr_sel
;
2119 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
2121 dev_err(hba
->dev
, "%s: attr-id 0x%x error code %d\n",
2122 get
, UIC_GET_ATTR_ID(attr_sel
), ret
);
2127 *mib_val
= uic_cmd
.argument3
;
2131 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr
);
2134 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
2135 * state) and waits for it to take effect.
2137 * @hba: per adapter instance
2138 * @cmd: UIC command to execute
2140 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
2141 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
2142 * and device UniPro link and hence it's final completion would be indicated by
2143 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
2144 * addition to normal UIC command completion Status (UCCS). This function only
2145 * returns after the relevant status bits indicate the completion.
2147 * Returns 0 on success, non-zero value on failure
2149 static int ufshcd_uic_pwr_ctrl(struct ufs_hba
*hba
, struct uic_command
*cmd
)
2151 struct completion uic_async_done
;
2152 unsigned long flags
;
2156 mutex_lock(&hba
->uic_cmd_mutex
);
2157 init_completion(&uic_async_done
);
2159 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2160 hba
->uic_async_done
= &uic_async_done
;
2161 ret
= __ufshcd_send_uic_cmd(hba
, cmd
);
2162 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2165 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2166 cmd
->command
, cmd
->argument3
, ret
);
2169 ret
= ufshcd_wait_for_uic_cmd(hba
, cmd
);
2172 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2173 cmd
->command
, cmd
->argument3
, ret
);
2177 if (!wait_for_completion_timeout(hba
->uic_async_done
,
2178 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
2180 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
2181 cmd
->command
, cmd
->argument3
);
2186 status
= ufshcd_get_upmcrs(hba
);
2187 if (status
!= PWR_LOCAL
) {
2189 "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
2190 cmd
->command
, status
);
2191 ret
= (status
!= PWR_OK
) ? status
: -1;
2194 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2195 hba
->uic_async_done
= NULL
;
2196 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2197 mutex_unlock(&hba
->uic_cmd_mutex
);
2203 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
2204 * using DME_SET primitives.
2205 * @hba: per adapter instance
2206 * @mode: powr mode value
2208 * Returns 0 on success, non-zero value on failure
2210 static int ufshcd_uic_change_pwr_mode(struct ufs_hba
*hba
, u8 mode
)
2212 struct uic_command uic_cmd
= {0};
2215 uic_cmd
.command
= UIC_CMD_DME_SET
;
2216 uic_cmd
.argument1
= UIC_ARG_MIB(PA_PWRMODE
);
2217 uic_cmd
.argument3
= mode
;
2218 ufshcd_hold(hba
, false);
2219 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
2220 ufshcd_release(hba
);
2225 static int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
)
2227 struct uic_command uic_cmd
= {0};
2229 uic_cmd
.command
= UIC_CMD_DME_HIBER_ENTER
;
2231 return ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
2234 static int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
)
2236 struct uic_command uic_cmd
= {0};
2239 uic_cmd
.command
= UIC_CMD_DME_HIBER_EXIT
;
2240 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
2242 ufshcd_set_link_off(hba
);
2243 ret
= ufshcd_host_reset_and_restore(hba
);
2250 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
2251 * @hba: per-adapter instance
2253 static int ufshcd_get_max_pwr_mode(struct ufs_hba
*hba
)
2255 struct ufs_pa_layer_attr
*pwr_info
= &hba
->max_pwr_info
.info
;
2257 if (hba
->max_pwr_info
.is_valid
)
2260 pwr_info
->pwr_tx
= FASTAUTO_MODE
;
2261 pwr_info
->pwr_rx
= FASTAUTO_MODE
;
2262 pwr_info
->hs_rate
= PA_HS_MODE_B
;
2264 /* Get the connected lane count */
2265 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES
),
2266 &pwr_info
->lane_rx
);
2267 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
2268 &pwr_info
->lane_tx
);
2270 if (!pwr_info
->lane_rx
|| !pwr_info
->lane_tx
) {
2271 dev_err(hba
->dev
, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
2279 * First, get the maximum gears of HS speed.
2280 * If a zero value, it means there is no HSGEAR capability.
2281 * Then, get the maximum gears of PWM speed.
2283 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
), &pwr_info
->gear_rx
);
2284 if (!pwr_info
->gear_rx
) {
2285 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
2286 &pwr_info
->gear_rx
);
2287 if (!pwr_info
->gear_rx
) {
2288 dev_err(hba
->dev
, "%s: invalid max pwm rx gear read = %d\n",
2289 __func__
, pwr_info
->gear_rx
);
2292 pwr_info
->pwr_rx
= SLOWAUTO_MODE
;
2295 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
),
2296 &pwr_info
->gear_tx
);
2297 if (!pwr_info
->gear_tx
) {
2298 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
2299 &pwr_info
->gear_tx
);
2300 if (!pwr_info
->gear_tx
) {
2301 dev_err(hba
->dev
, "%s: invalid max pwm tx gear read = %d\n",
2302 __func__
, pwr_info
->gear_tx
);
2305 pwr_info
->pwr_tx
= SLOWAUTO_MODE
;
2308 hba
->max_pwr_info
.is_valid
= true;
2312 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
2313 struct ufs_pa_layer_attr
*pwr_mode
)
2317 /* if already configured to the requested pwr_mode */
2318 if (pwr_mode
->gear_rx
== hba
->pwr_info
.gear_rx
&&
2319 pwr_mode
->gear_tx
== hba
->pwr_info
.gear_tx
&&
2320 pwr_mode
->lane_rx
== hba
->pwr_info
.lane_rx
&&
2321 pwr_mode
->lane_tx
== hba
->pwr_info
.lane_tx
&&
2322 pwr_mode
->pwr_rx
== hba
->pwr_info
.pwr_rx
&&
2323 pwr_mode
->pwr_tx
== hba
->pwr_info
.pwr_tx
&&
2324 pwr_mode
->hs_rate
== hba
->pwr_info
.hs_rate
) {
2325 dev_dbg(hba
->dev
, "%s: power already configured\n", __func__
);
2330 * Configure attributes for power mode change with below.
2331 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
2332 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
2335 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXGEAR
), pwr_mode
->gear_rx
);
2336 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVERXDATALANES
),
2338 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
2339 pwr_mode
->pwr_rx
== FAST_MODE
)
2340 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), TRUE
);
2342 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), FALSE
);
2344 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXGEAR
), pwr_mode
->gear_tx
);
2345 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVETXDATALANES
),
2347 if (pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
2348 pwr_mode
->pwr_tx
== FAST_MODE
)
2349 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), TRUE
);
2351 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), FALSE
);
2353 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
2354 pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
2355 pwr_mode
->pwr_rx
== FAST_MODE
||
2356 pwr_mode
->pwr_tx
== FAST_MODE
)
2357 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HSSERIES
),
2360 ret
= ufshcd_uic_change_pwr_mode(hba
, pwr_mode
->pwr_rx
<< 4
2361 | pwr_mode
->pwr_tx
);
2365 "%s: power mode change failed %d\n", __func__
, ret
);
2367 if (hba
->vops
&& hba
->vops
->pwr_change_notify
)
2368 hba
->vops
->pwr_change_notify(hba
,
2369 POST_CHANGE
, NULL
, pwr_mode
);
2371 memcpy(&hba
->pwr_info
, pwr_mode
,
2372 sizeof(struct ufs_pa_layer_attr
));
2379 * ufshcd_config_pwr_mode - configure a new power mode
2380 * @hba: per-adapter instance
2381 * @desired_pwr_mode: desired power configuration
2383 static int ufshcd_config_pwr_mode(struct ufs_hba
*hba
,
2384 struct ufs_pa_layer_attr
*desired_pwr_mode
)
2386 struct ufs_pa_layer_attr final_params
= { 0 };
2389 if (hba
->vops
&& hba
->vops
->pwr_change_notify
)
2390 hba
->vops
->pwr_change_notify(hba
,
2391 PRE_CHANGE
, desired_pwr_mode
, &final_params
);
2393 memcpy(&final_params
, desired_pwr_mode
, sizeof(final_params
));
2395 ret
= ufshcd_change_power_mode(hba
, &final_params
);
2401 * ufshcd_complete_dev_init() - checks device readiness
2402 * hba: per-adapter instance
2404 * Set fDeviceInit flag and poll until device toggles it.
2406 static int ufshcd_complete_dev_init(struct ufs_hba
*hba
)
2408 int i
, retries
, err
= 0;
2411 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
2412 /* Set the fDeviceInit flag */
2413 err
= ufshcd_query_flag(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
2414 QUERY_FLAG_IDN_FDEVICEINIT
, NULL
);
2415 if (!err
|| err
== -ETIMEDOUT
)
2417 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
2421 "%s setting fDeviceInit flag failed with error %d\n",
2426 /* poll for max. 100 iterations for fDeviceInit flag to clear */
2427 for (i
= 0; i
< 100 && !err
&& flag_res
; i
++) {
2428 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
2429 err
= ufshcd_query_flag(hba
,
2430 UPIU_QUERY_OPCODE_READ_FLAG
,
2431 QUERY_FLAG_IDN_FDEVICEINIT
, &flag_res
);
2432 if (!err
|| err
== -ETIMEDOUT
)
2434 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
,
2440 "%s reading fDeviceInit flag failed with error %d\n",
2444 "%s fDeviceInit was not cleared by the device\n",
2452 * ufshcd_make_hba_operational - Make UFS controller operational
2453 * @hba: per adapter instance
2455 * To bring UFS host controller to operational state,
2456 * 1. Enable required interrupts
2457 * 2. Configure interrupt aggregation
2458 * 3. Program UTRL and UTMRL base addres
2459 * 4. Configure run-stop-registers
2461 * Returns 0 on success, non-zero value on failure
2463 static int ufshcd_make_hba_operational(struct ufs_hba
*hba
)
2468 /* Enable required interrupts */
2469 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_INTRS
);
2471 /* Configure interrupt aggregation */
2472 ufshcd_config_intr_aggr(hba
, hba
->nutrs
- 1, INT_AGGR_DEF_TO
);
2474 /* Configure UTRL and UTMRL base address registers */
2475 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
2476 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
2477 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
2478 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
2479 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
2480 REG_UTP_TASK_REQ_LIST_BASE_L
);
2481 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
2482 REG_UTP_TASK_REQ_LIST_BASE_H
);
2485 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
2486 * DEI, HEI bits must be 0
2488 reg
= ufshcd_readl(hba
, REG_CONTROLLER_STATUS
);
2489 if (!(ufshcd_get_lists_status(reg
))) {
2490 ufshcd_enable_run_stop_reg(hba
);
2493 "Host controller not ready to process requests");
2503 * ufshcd_hba_enable - initialize the controller
2504 * @hba: per adapter instance
2506 * The controller resets itself and controller firmware initialization
2507 * sequence kicks off. When controller is ready it will set
2508 * the Host Controller Enable bit to 1.
2510 * Returns 0 on success, non-zero value on failure
2512 static int ufshcd_hba_enable(struct ufs_hba
*hba
)
2517 * msleep of 1 and 5 used in this function might result in msleep(20),
2518 * but it was necessary to send the UFS FPGA to reset mode during
2519 * development and testing of this driver. msleep can be changed to
2520 * mdelay and retry count can be reduced based on the controller.
2522 if (!ufshcd_is_hba_active(hba
)) {
2524 /* change controller state to "reset state" */
2525 ufshcd_hba_stop(hba
);
2528 * This delay is based on the testing done with UFS host
2529 * controller FPGA. The delay can be changed based on the
2530 * host controller used.
2535 /* UniPro link is disabled at this point */
2536 ufshcd_set_link_off(hba
);
2538 if (hba
->vops
&& hba
->vops
->hce_enable_notify
)
2539 hba
->vops
->hce_enable_notify(hba
, PRE_CHANGE
);
2541 /* start controller initialization sequence */
2542 ufshcd_hba_start(hba
);
2545 * To initialize a UFS host controller HCE bit must be set to 1.
2546 * During initialization the HCE bit value changes from 1->0->1.
2547 * When the host controller completes initialization sequence
2548 * it sets the value of HCE bit to 1. The same HCE bit is read back
2549 * to check if the controller has completed initialization sequence.
2550 * So without this delay the value HCE = 1, set in the previous
2551 * instruction might be read back.
2552 * This delay can be changed based on the controller.
2556 /* wait for the host controller to complete initialization */
2558 while (ufshcd_is_hba_active(hba
)) {
2563 "Controller enable failed\n");
2569 /* enable UIC related interrupts */
2570 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
2572 if (hba
->vops
&& hba
->vops
->hce_enable_notify
)
2573 hba
->vops
->hce_enable_notify(hba
, POST_CHANGE
);
2579 * ufshcd_link_startup - Initialize unipro link startup
2580 * @hba: per adapter instance
2582 * Returns 0 for success, non-zero in case of failure
2584 static int ufshcd_link_startup(struct ufs_hba
*hba
)
2587 int retries
= DME_LINKSTARTUP_RETRIES
;
2590 if (hba
->vops
&& hba
->vops
->link_startup_notify
)
2591 hba
->vops
->link_startup_notify(hba
, PRE_CHANGE
);
2593 ret
= ufshcd_dme_link_startup(hba
);
2595 /* check if device is detected by inter-connect layer */
2596 if (!ret
&& !ufshcd_is_device_present(hba
)) {
2597 dev_err(hba
->dev
, "%s: Device not present\n", __func__
);
2603 * DME link lost indication is only received when link is up,
2604 * but we can't be sure if the link is up until link startup
2605 * succeeds. So reset the local Uni-Pro and try again.
2607 if (ret
&& ufshcd_hba_enable(hba
))
2609 } while (ret
&& retries
--);
2612 /* failed to get the link up... retire */
2615 /* Include any host controller configuration via UIC commands */
2616 if (hba
->vops
&& hba
->vops
->link_startup_notify
) {
2617 ret
= hba
->vops
->link_startup_notify(hba
, POST_CHANGE
);
2622 ret
= ufshcd_make_hba_operational(hba
);
2625 dev_err(hba
->dev
, "link startup failed %d\n", ret
);
2630 * ufshcd_verify_dev_init() - Verify device initialization
2631 * @hba: per-adapter instance
2633 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
2634 * device Transport Protocol (UTP) layer is ready after a reset.
2635 * If the UTP layer at the device side is not initialized, it may
2636 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
2637 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
2639 static int ufshcd_verify_dev_init(struct ufs_hba
*hba
)
2644 ufshcd_hold(hba
, false);
2645 mutex_lock(&hba
->dev_cmd
.lock
);
2646 for (retries
= NOP_OUT_RETRIES
; retries
> 0; retries
--) {
2647 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_NOP
,
2650 if (!err
|| err
== -ETIMEDOUT
)
2653 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
2655 mutex_unlock(&hba
->dev_cmd
.lock
);
2656 ufshcd_release(hba
);
2659 dev_err(hba
->dev
, "%s: NOP OUT failed %d\n", __func__
, err
);
2664 * ufshcd_set_queue_depth - set lun queue depth
2665 * @sdev: pointer to SCSI device
2667 * Read bLUQueueDepth value and activate scsi tagged command
2668 * queueing. For WLUN, queue depth is set to 1. For best-effort
2669 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
2670 * value that host can queue.
2672 static void ufshcd_set_queue_depth(struct scsi_device
*sdev
)
2676 struct ufs_hba
*hba
;
2678 hba
= shost_priv(sdev
->host
);
2680 lun_qdepth
= hba
->nutrs
;
2681 ret
= ufshcd_read_unit_desc_param(hba
,
2682 ufshcd_scsi_to_upiu_lun(sdev
->lun
),
2683 UNIT_DESC_PARAM_LU_Q_DEPTH
,
2685 sizeof(lun_qdepth
));
2687 /* Some WLUN doesn't support unit descriptor */
2688 if (ret
== -EOPNOTSUPP
)
2690 else if (!lun_qdepth
)
2691 /* eventually, we can figure out the real queue depth */
2692 lun_qdepth
= hba
->nutrs
;
2694 lun_qdepth
= min_t(int, lun_qdepth
, hba
->nutrs
);
2696 dev_dbg(hba
->dev
, "%s: activate tcq with queue depth %d\n",
2697 __func__
, lun_qdepth
);
2698 scsi_activate_tcq(sdev
, lun_qdepth
);
2702 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
2703 * @hba: per-adapter instance
2704 * @lun: UFS device lun id
2705 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
2707 * Returns 0 in case of success and b_lu_write_protect status would be returned
2708 * @b_lu_write_protect parameter.
2709 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
2710 * Returns -EINVAL in case of invalid parameters passed to this function.
2712 static int ufshcd_get_lu_wp(struct ufs_hba
*hba
,
2714 u8
*b_lu_write_protect
)
2718 if (!b_lu_write_protect
)
2721 * According to UFS device spec, RPMB LU can't be write
2722 * protected so skip reading bLUWriteProtect parameter for
2723 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
2725 else if (lun
>= UFS_UPIU_MAX_GENERAL_LUN
)
2728 ret
= ufshcd_read_unit_desc_param(hba
,
2730 UNIT_DESC_PARAM_LU_WR_PROTECT
,
2732 sizeof(*b_lu_write_protect
));
2737 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
2739 * @hba: per-adapter instance
2740 * @sdev: pointer to SCSI device
2743 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba
*hba
,
2744 struct scsi_device
*sdev
)
2746 if (hba
->dev_info
.f_power_on_wp_en
&&
2747 !hba
->dev_info
.is_lu_power_on_wp
) {
2748 u8 b_lu_write_protect
;
2750 if (!ufshcd_get_lu_wp(hba
, ufshcd_scsi_to_upiu_lun(sdev
->lun
),
2751 &b_lu_write_protect
) &&
2752 (b_lu_write_protect
== UFS_LU_POWER_ON_WP
))
2753 hba
->dev_info
.is_lu_power_on_wp
= true;
2758 * ufshcd_slave_alloc - handle initial SCSI device configurations
2759 * @sdev: pointer to SCSI device
2763 static int ufshcd_slave_alloc(struct scsi_device
*sdev
)
2765 struct ufs_hba
*hba
;
2767 hba
= shost_priv(sdev
->host
);
2768 sdev
->tagged_supported
= 1;
2770 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
2771 sdev
->use_10_for_ms
= 1;
2772 scsi_set_tag_type(sdev
, MSG_SIMPLE_TAG
);
2774 /* allow SCSI layer to restart the device in case of errors */
2775 sdev
->allow_restart
= 1;
2777 /* REPORT SUPPORTED OPERATION CODES is not supported */
2778 sdev
->no_report_opcodes
= 1;
2781 ufshcd_set_queue_depth(sdev
);
2783 ufshcd_get_lu_power_on_wp_status(hba
, sdev
);
2789 * ufshcd_change_queue_depth - change queue depth
2790 * @sdev: pointer to SCSI device
2791 * @depth: required depth to set
2792 * @reason: reason for changing the depth
2794 * Change queue depth according to the reason and make sure
2795 * the max. limits are not crossed.
2797 static int ufshcd_change_queue_depth(struct scsi_device
*sdev
,
2798 int depth
, int reason
)
2800 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
2802 if (depth
> hba
->nutrs
)
2806 case SCSI_QDEPTH_DEFAULT
:
2807 case SCSI_QDEPTH_RAMP_UP
:
2808 if (!sdev
->tagged_supported
)
2810 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), depth
);
2812 case SCSI_QDEPTH_QFULL
:
2813 scsi_track_queue_full(sdev
, depth
);
2823 * ufshcd_slave_configure - adjust SCSI device configurations
2824 * @sdev: pointer to SCSI device
2826 static int ufshcd_slave_configure(struct scsi_device
*sdev
)
2828 struct request_queue
*q
= sdev
->request_queue
;
2830 blk_queue_update_dma_pad(q
, PRDT_DATA_BYTE_COUNT_PAD
- 1);
2831 blk_queue_max_segment_size(q
, PRDT_DATA_BYTE_COUNT_MAX
);
2837 * ufshcd_slave_destroy - remove SCSI device configurations
2838 * @sdev: pointer to SCSI device
2840 static void ufshcd_slave_destroy(struct scsi_device
*sdev
)
2842 struct ufs_hba
*hba
;
2844 hba
= shost_priv(sdev
->host
);
2845 scsi_deactivate_tcq(sdev
, hba
->nutrs
);
2846 /* Drop the reference as it won't be needed anymore */
2847 if (ufshcd_scsi_to_upiu_lun(sdev
->lun
) == UFS_UPIU_UFS_DEVICE_WLUN
)
2848 hba
->sdev_ufs_device
= NULL
;
2852 * ufshcd_task_req_compl - handle task management request completion
2853 * @hba: per adapter instance
2854 * @index: index of the completed request
2855 * @resp: task management service response
2857 * Returns non-zero value on error, zero on success
2859 static int ufshcd_task_req_compl(struct ufs_hba
*hba
, u32 index
, u8
*resp
)
2861 struct utp_task_req_desc
*task_req_descp
;
2862 struct utp_upiu_task_rsp
*task_rsp_upiup
;
2863 unsigned long flags
;
2867 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2869 /* Clear completed tasks from outstanding_tasks */
2870 __clear_bit(index
, &hba
->outstanding_tasks
);
2872 task_req_descp
= hba
->utmrdl_base_addr
;
2873 ocs_value
= ufshcd_get_tmr_ocs(&task_req_descp
[index
]);
2875 if (ocs_value
== OCS_SUCCESS
) {
2876 task_rsp_upiup
= (struct utp_upiu_task_rsp
*)
2877 task_req_descp
[index
].task_rsp_upiu
;
2878 task_result
= be32_to_cpu(task_rsp_upiup
->header
.dword_1
);
2879 task_result
= ((task_result
& MASK_TASK_RESPONSE
) >> 8);
2881 *resp
= (u8
)task_result
;
2883 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n",
2884 __func__
, ocs_value
);
2886 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2892 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
2893 * @lrb: pointer to local reference block of completed command
2894 * @scsi_status: SCSI command status
2896 * Returns value base on SCSI command status
2899 ufshcd_scsi_cmd_status(struct ufshcd_lrb
*lrbp
, int scsi_status
)
2903 switch (scsi_status
) {
2904 case SAM_STAT_CHECK_CONDITION
:
2905 ufshcd_copy_sense_data(lrbp
);
2907 result
|= DID_OK
<< 16 |
2908 COMMAND_COMPLETE
<< 8 |
2911 case SAM_STAT_TASK_SET_FULL
:
2913 case SAM_STAT_TASK_ABORTED
:
2914 ufshcd_copy_sense_data(lrbp
);
2915 result
|= scsi_status
;
2918 result
|= DID_ERROR
<< 16;
2920 } /* end of switch */
2926 * ufshcd_transfer_rsp_status - Get overall status of the response
2927 * @hba: per adapter instance
2928 * @lrb: pointer to local reference block of completed command
2930 * Returns result of the command to notify SCSI midlayer
2933 ufshcd_transfer_rsp_status(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2939 /* overall command status of utrd */
2940 ocs
= ufshcd_get_tr_ocs(lrbp
);
2944 result
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
2947 case UPIU_TRANSACTION_RESPONSE
:
2949 * get the response UPIU result to extract
2950 * the SCSI command status
2952 result
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
);
2955 * get the result based on SCSI status response
2956 * to notify the SCSI midlayer of the command status
2958 scsi_status
= result
& MASK_SCSI_STATUS
;
2959 result
= ufshcd_scsi_cmd_status(lrbp
, scsi_status
);
2961 if (ufshcd_is_exception_event(lrbp
->ucd_rsp_ptr
))
2962 schedule_work(&hba
->eeh_work
);
2964 case UPIU_TRANSACTION_REJECT_UPIU
:
2965 /* TODO: handle Reject UPIU Response */
2966 result
= DID_ERROR
<< 16;
2968 "Reject UPIU not fully implemented\n");
2971 result
= DID_ERROR
<< 16;
2973 "Unexpected request response code = %x\n",
2979 result
|= DID_ABORT
<< 16;
2981 case OCS_INVALID_COMMAND_STATUS
:
2982 result
|= DID_REQUEUE
<< 16;
2984 case OCS_INVALID_CMD_TABLE_ATTR
:
2985 case OCS_INVALID_PRDT_ATTR
:
2986 case OCS_MISMATCH_DATA_BUF_SIZE
:
2987 case OCS_MISMATCH_RESP_UPIU_SIZE
:
2988 case OCS_PEER_COMM_FAILURE
:
2989 case OCS_FATAL_ERROR
:
2991 result
|= DID_ERROR
<< 16;
2993 "OCS error from controller = %x\n", ocs
);
2995 } /* end of switch */
3001 * ufshcd_uic_cmd_compl - handle completion of uic command
3002 * @hba: per adapter instance
3003 * @intr_status: interrupt status generated by the controller
3005 static void ufshcd_uic_cmd_compl(struct ufs_hba
*hba
, u32 intr_status
)
3007 if ((intr_status
& UIC_COMMAND_COMPL
) && hba
->active_uic_cmd
) {
3008 hba
->active_uic_cmd
->argument2
|=
3009 ufshcd_get_uic_cmd_result(hba
);
3010 hba
->active_uic_cmd
->argument3
=
3011 ufshcd_get_dme_attr_val(hba
);
3012 complete(&hba
->active_uic_cmd
->done
);
3015 if ((intr_status
& UFSHCD_UIC_PWR_MASK
) && hba
->uic_async_done
)
3016 complete(hba
->uic_async_done
);
3020 * ufshcd_transfer_req_compl - handle SCSI and query command completion
3021 * @hba: per adapter instance
3023 static void ufshcd_transfer_req_compl(struct ufs_hba
*hba
)
3025 struct ufshcd_lrb
*lrbp
;
3026 struct scsi_cmnd
*cmd
;
3027 unsigned long completed_reqs
;
3032 /* Resetting interrupt aggregation counters first and reading the
3033 * DOOR_BELL afterward allows us to handle all the completed requests.
3034 * In order to prevent other interrupts starvation the DB is read once
3035 * after reset. The down side of this solution is the possibility of
3036 * false interrupt if device completes another request after resetting
3037 * aggregation and before reading the DB.
3039 ufshcd_reset_intr_aggr(hba
);
3041 tr_doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
3042 completed_reqs
= tr_doorbell
^ hba
->outstanding_reqs
;
3044 for_each_set_bit(index
, &completed_reqs
, hba
->nutrs
) {
3045 lrbp
= &hba
->lrb
[index
];
3048 result
= ufshcd_transfer_rsp_status(hba
, lrbp
);
3049 scsi_dma_unmap(cmd
);
3050 cmd
->result
= result
;
3051 /* Mark completed command as NULL in LRB */
3053 clear_bit_unlock(index
, &hba
->lrb_in_use
);
3054 /* Do not touch lrbp after scsi done */
3055 cmd
->scsi_done(cmd
);
3056 __ufshcd_release(hba
);
3057 } else if (lrbp
->command_type
== UTP_CMD_TYPE_DEV_MANAGE
) {
3058 if (hba
->dev_cmd
.complete
)
3059 complete(hba
->dev_cmd
.complete
);
3063 /* clear corresponding bits of completed commands */
3064 hba
->outstanding_reqs
^= completed_reqs
;
3066 ufshcd_clk_scaling_update_busy(hba
);
3068 /* we might have free'd some tags above */
3069 wake_up(&hba
->dev_cmd
.tag_wq
);
3073 * ufshcd_disable_ee - disable exception event
3074 * @hba: per-adapter instance
3075 * @mask: exception event to disable
3077 * Disables exception event in the device so that the EVENT_ALERT
3080 * Returns zero on success, non-zero error value on failure.
3082 static int ufshcd_disable_ee(struct ufs_hba
*hba
, u16 mask
)
3087 if (!(hba
->ee_ctrl_mask
& mask
))
3090 val
= hba
->ee_ctrl_mask
& ~mask
;
3091 val
&= 0xFFFF; /* 2 bytes */
3092 err
= ufshcd_query_attr(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
3093 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0, &val
);
3095 hba
->ee_ctrl_mask
&= ~mask
;
3101 * ufshcd_enable_ee - enable exception event
3102 * @hba: per-adapter instance
3103 * @mask: exception event to enable
3105 * Enable corresponding exception event in the device to allow
3106 * device to alert host in critical scenarios.
3108 * Returns zero on success, non-zero error value on failure.
3110 static int ufshcd_enable_ee(struct ufs_hba
*hba
, u16 mask
)
3115 if (hba
->ee_ctrl_mask
& mask
)
3118 val
= hba
->ee_ctrl_mask
| mask
;
3119 val
&= 0xFFFF; /* 2 bytes */
3120 err
= ufshcd_query_attr(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
3121 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0, &val
);
3123 hba
->ee_ctrl_mask
|= mask
;
3129 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
3130 * @hba: per-adapter instance
3132 * Allow device to manage background operations on its own. Enabling
3133 * this might lead to inconsistent latencies during normal data transfers
3134 * as the device is allowed to manage its own way of handling background
3137 * Returns zero on success, non-zero on failure.
3139 static int ufshcd_enable_auto_bkops(struct ufs_hba
*hba
)
3143 if (hba
->auto_bkops_enabled
)
3146 err
= ufshcd_query_flag(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
3147 QUERY_FLAG_IDN_BKOPS_EN
, NULL
);
3149 dev_err(hba
->dev
, "%s: failed to enable bkops %d\n",
3154 hba
->auto_bkops_enabled
= true;
3156 /* No need of URGENT_BKOPS exception from the device */
3157 err
= ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
3159 dev_err(hba
->dev
, "%s: failed to disable exception event %d\n",
3166 * ufshcd_disable_auto_bkops - block device in doing background operations
3167 * @hba: per-adapter instance
3169 * Disabling background operations improves command response latency but
3170 * has drawback of device moving into critical state where the device is
3171 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
3172 * host is idle so that BKOPS are managed effectively without any negative
3175 * Returns zero on success, non-zero on failure.
3177 static int ufshcd_disable_auto_bkops(struct ufs_hba
*hba
)
3181 if (!hba
->auto_bkops_enabled
)
3185 * If host assisted BKOPs is to be enabled, make sure
3186 * urgent bkops exception is allowed.
3188 err
= ufshcd_enable_ee(hba
, MASK_EE_URGENT_BKOPS
);
3190 dev_err(hba
->dev
, "%s: failed to enable exception event %d\n",
3195 err
= ufshcd_query_flag(hba
, UPIU_QUERY_OPCODE_CLEAR_FLAG
,
3196 QUERY_FLAG_IDN_BKOPS_EN
, NULL
);
3198 dev_err(hba
->dev
, "%s: failed to disable bkops %d\n",
3200 ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
3204 hba
->auto_bkops_enabled
= false;
3210 * ufshcd_force_reset_auto_bkops - force enable of auto bkops
3211 * @hba: per adapter instance
3213 * After a device reset the device may toggle the BKOPS_EN flag
3214 * to default value. The s/w tracking variables should be updated
3215 * as well. Do this by forcing enable of auto bkops.
3217 static void ufshcd_force_reset_auto_bkops(struct ufs_hba
*hba
)
3219 hba
->auto_bkops_enabled
= false;
3220 hba
->ee_ctrl_mask
|= MASK_EE_URGENT_BKOPS
;
3221 ufshcd_enable_auto_bkops(hba
);
3224 static inline int ufshcd_get_bkops_status(struct ufs_hba
*hba
, u32
*status
)
3226 return ufshcd_query_attr(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
3227 QUERY_ATTR_IDN_BKOPS_STATUS
, 0, 0, status
);
3231 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
3232 * @hba: per-adapter instance
3233 * @status: bkops_status value
3235 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
3236 * flag in the device to permit background operations if the device
3237 * bkops_status is greater than or equal to "status" argument passed to
3238 * this function, disable otherwise.
3240 * Returns 0 for success, non-zero in case of failure.
3242 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
3243 * to know whether auto bkops is enabled or disabled after this function
3244 * returns control to it.
3246 static int ufshcd_bkops_ctrl(struct ufs_hba
*hba
,
3247 enum bkops_status status
)
3250 u32 curr_status
= 0;
3252 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
3254 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
3257 } else if (curr_status
> BKOPS_STATUS_MAX
) {
3258 dev_err(hba
->dev
, "%s: invalid BKOPS status %d\n",
3259 __func__
, curr_status
);
3264 if (curr_status
>= status
)
3265 err
= ufshcd_enable_auto_bkops(hba
);
3267 err
= ufshcd_disable_auto_bkops(hba
);
3273 * ufshcd_urgent_bkops - handle urgent bkops exception event
3274 * @hba: per-adapter instance
3276 * Enable fBackgroundOpsEn flag in the device to permit background
3279 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
3280 * and negative error value for any other failure.
3282 static int ufshcd_urgent_bkops(struct ufs_hba
*hba
)
3284 return ufshcd_bkops_ctrl(hba
, BKOPS_STATUS_PERF_IMPACT
);
3287 static inline int ufshcd_get_ee_status(struct ufs_hba
*hba
, u32
*status
)
3289 return ufshcd_query_attr(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
3290 QUERY_ATTR_IDN_EE_STATUS
, 0, 0, status
);
3294 * ufshcd_exception_event_handler - handle exceptions raised by device
3295 * @work: pointer to work data
3297 * Read bExceptionEventStatus attribute from the device and handle the
3298 * exception event accordingly.
3300 static void ufshcd_exception_event_handler(struct work_struct
*work
)
3302 struct ufs_hba
*hba
;
3305 hba
= container_of(work
, struct ufs_hba
, eeh_work
);
3307 pm_runtime_get_sync(hba
->dev
);
3308 err
= ufshcd_get_ee_status(hba
, &status
);
3310 dev_err(hba
->dev
, "%s: failed to get exception status %d\n",
3315 status
&= hba
->ee_ctrl_mask
;
3316 if (status
& MASK_EE_URGENT_BKOPS
) {
3317 err
= ufshcd_urgent_bkops(hba
);
3319 dev_err(hba
->dev
, "%s: failed to handle urgent bkops %d\n",
3323 pm_runtime_put_sync(hba
->dev
);
3328 * ufshcd_err_handler - handle UFS errors that require s/w attention
3329 * @work: pointer to work structure
3331 static void ufshcd_err_handler(struct work_struct
*work
)
3333 struct ufs_hba
*hba
;
3334 unsigned long flags
;
3340 hba
= container_of(work
, struct ufs_hba
, eh_work
);
3342 pm_runtime_get_sync(hba
->dev
);
3343 ufshcd_hold(hba
, false);
3345 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3346 if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
) {
3347 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3351 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
3352 ufshcd_set_eh_in_progress(hba
);
3354 /* Complete requests that have door-bell cleared by h/w */
3355 ufshcd_transfer_req_compl(hba
);
3356 ufshcd_tmc_handler(hba
);
3357 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3359 /* Clear pending transfer requests */
3360 for_each_set_bit(tag
, &hba
->outstanding_reqs
, hba
->nutrs
)
3361 if (ufshcd_clear_cmd(hba
, tag
))
3362 err_xfer
|= 1 << tag
;
3364 /* Clear pending task management requests */
3365 for_each_set_bit(tag
, &hba
->outstanding_tasks
, hba
->nutmrs
)
3366 if (ufshcd_clear_tm_cmd(hba
, tag
))
3369 /* Complete the requests that are cleared by s/w */
3370 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3371 ufshcd_transfer_req_compl(hba
);
3372 ufshcd_tmc_handler(hba
);
3373 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3375 /* Fatal errors need reset */
3376 if (err_xfer
|| err_tm
|| (hba
->saved_err
& INT_FATAL_ERRORS
) ||
3377 ((hba
->saved_err
& UIC_ERROR
) &&
3378 (hba
->saved_uic_err
& UFSHCD_UIC_DL_PA_INIT_ERROR
))) {
3379 err
= ufshcd_reset_and_restore(hba
);
3381 dev_err(hba
->dev
, "%s: reset and restore failed\n",
3383 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
3386 * Inform scsi mid-layer that we did reset and allow to handle
3387 * Unit Attention properly.
3389 scsi_report_bus_reset(hba
->host
, 0);
3391 hba
->saved_uic_err
= 0;
3393 ufshcd_clear_eh_in_progress(hba
);
3396 scsi_unblock_requests(hba
->host
);
3397 ufshcd_release(hba
);
3398 pm_runtime_put_sync(hba
->dev
);
3402 * ufshcd_update_uic_error - check and set fatal UIC error flags.
3403 * @hba: per-adapter instance
3405 static void ufshcd_update_uic_error(struct ufs_hba
*hba
)
3409 /* PA_INIT_ERROR is fatal and needs UIC reset */
3410 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DATA_LINK_LAYER
);
3411 if (reg
& UIC_DATA_LINK_LAYER_ERROR_PA_INIT
)
3412 hba
->uic_error
|= UFSHCD_UIC_DL_PA_INIT_ERROR
;
3414 /* UIC NL/TL/DME errors needs software retry */
3415 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_NETWORK_LAYER
);
3417 hba
->uic_error
|= UFSHCD_UIC_NL_ERROR
;
3419 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_TRANSPORT_LAYER
);
3421 hba
->uic_error
|= UFSHCD_UIC_TL_ERROR
;
3423 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DME
);
3425 hba
->uic_error
|= UFSHCD_UIC_DME_ERROR
;
3427 dev_dbg(hba
->dev
, "%s: UIC error flags = 0x%08x\n",
3428 __func__
, hba
->uic_error
);
3432 * ufshcd_check_errors - Check for errors that need s/w attention
3433 * @hba: per-adapter instance
3435 static void ufshcd_check_errors(struct ufs_hba
*hba
)
3437 bool queue_eh_work
= false;
3439 if (hba
->errors
& INT_FATAL_ERRORS
)
3440 queue_eh_work
= true;
3442 if (hba
->errors
& UIC_ERROR
) {
3444 ufshcd_update_uic_error(hba
);
3446 queue_eh_work
= true;
3449 if (queue_eh_work
) {
3450 /* handle fatal errors only when link is functional */
3451 if (hba
->ufshcd_state
== UFSHCD_STATE_OPERATIONAL
) {
3452 /* block commands from scsi mid-layer */
3453 scsi_block_requests(hba
->host
);
3455 /* transfer error masks to sticky bits */
3456 hba
->saved_err
|= hba
->errors
;
3457 hba
->saved_uic_err
|= hba
->uic_error
;
3459 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
3460 schedule_work(&hba
->eh_work
);
3464 * if (!queue_eh_work) -
3465 * Other errors are either non-fatal where host recovers
3466 * itself without s/w intervention or errors that will be
3467 * handled by the SCSI core layer.
3472 * ufshcd_tmc_handler - handle task management function completion
3473 * @hba: per adapter instance
3475 static void ufshcd_tmc_handler(struct ufs_hba
*hba
)
3479 tm_doorbell
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
3480 hba
->tm_condition
= tm_doorbell
^ hba
->outstanding_tasks
;
3481 wake_up(&hba
->tm_wq
);
3485 * ufshcd_sl_intr - Interrupt service routine
3486 * @hba: per adapter instance
3487 * @intr_status: contains interrupts generated by the controller
3489 static void ufshcd_sl_intr(struct ufs_hba
*hba
, u32 intr_status
)
3491 hba
->errors
= UFSHCD_ERROR_MASK
& intr_status
;
3493 ufshcd_check_errors(hba
);
3495 if (intr_status
& UFSHCD_UIC_MASK
)
3496 ufshcd_uic_cmd_compl(hba
, intr_status
);
3498 if (intr_status
& UTP_TASK_REQ_COMPL
)
3499 ufshcd_tmc_handler(hba
);
3501 if (intr_status
& UTP_TRANSFER_REQ_COMPL
)
3502 ufshcd_transfer_req_compl(hba
);
3506 * ufshcd_intr - Main interrupt service routine
3508 * @__hba: pointer to adapter instance
3510 * Returns IRQ_HANDLED - If interrupt is valid
3511 * IRQ_NONE - If invalid interrupt
3513 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
)
3516 irqreturn_t retval
= IRQ_NONE
;
3517 struct ufs_hba
*hba
= __hba
;
3519 spin_lock(hba
->host
->host_lock
);
3520 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
3523 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
3524 ufshcd_sl_intr(hba
, intr_status
);
3525 retval
= IRQ_HANDLED
;
3527 spin_unlock(hba
->host
->host_lock
);
3531 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
)
3534 u32 mask
= 1 << tag
;
3535 unsigned long flags
;
3537 if (!test_bit(tag
, &hba
->outstanding_tasks
))
3540 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3541 ufshcd_writel(hba
, ~(1 << tag
), REG_UTP_TASK_REQ_LIST_CLEAR
);
3542 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3544 /* poll for max. 1 sec to clear door bell register by h/w */
3545 err
= ufshcd_wait_for_register(hba
,
3546 REG_UTP_TASK_REQ_DOOR_BELL
,
3547 mask
, 0, 1000, 1000);
3553 * ufshcd_issue_tm_cmd - issues task management commands to controller
3554 * @hba: per adapter instance
3555 * @lun_id: LUN ID to which TM command is sent
3556 * @task_id: task ID to which the TM command is applicable
3557 * @tm_function: task management function opcode
3558 * @tm_response: task management service response return value
3560 * Returns non-zero value on error, zero on success.
3562 static int ufshcd_issue_tm_cmd(struct ufs_hba
*hba
, int lun_id
, int task_id
,
3563 u8 tm_function
, u8
*tm_response
)
3565 struct utp_task_req_desc
*task_req_descp
;
3566 struct utp_upiu_task_req
*task_req_upiup
;
3567 struct Scsi_Host
*host
;
3568 unsigned long flags
;
3576 * Get free slot, sleep if slots are unavailable.
3577 * Even though we use wait_event() which sleeps indefinitely,
3578 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
3580 wait_event(hba
->tm_tag_wq
, ufshcd_get_tm_free_slot(hba
, &free_slot
));
3581 ufshcd_hold(hba
, false);
3583 spin_lock_irqsave(host
->host_lock
, flags
);
3584 task_req_descp
= hba
->utmrdl_base_addr
;
3585 task_req_descp
+= free_slot
;
3587 /* Configure task request descriptor */
3588 task_req_descp
->header
.dword_0
= cpu_to_le32(UTP_REQ_DESC_INT_CMD
);
3589 task_req_descp
->header
.dword_2
=
3590 cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
3592 /* Configure task request UPIU */
3594 (struct utp_upiu_task_req
*) task_req_descp
->task_req_upiu
;
3595 task_tag
= hba
->nutrs
+ free_slot
;
3596 task_req_upiup
->header
.dword_0
=
3597 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ
, 0,
3599 task_req_upiup
->header
.dword_1
=
3600 UPIU_HEADER_DWORD(0, tm_function
, 0, 0);
3602 * The host shall provide the same value for LUN field in the basic
3603 * header and for Input Parameter.
3605 task_req_upiup
->input_param1
= cpu_to_be32(lun_id
);
3606 task_req_upiup
->input_param2
= cpu_to_be32(task_id
);
3608 /* send command to the controller */
3609 __set_bit(free_slot
, &hba
->outstanding_tasks
);
3610 ufshcd_writel(hba
, 1 << free_slot
, REG_UTP_TASK_REQ_DOOR_BELL
);
3612 spin_unlock_irqrestore(host
->host_lock
, flags
);
3614 /* wait until the task management command is completed */
3615 err
= wait_event_timeout(hba
->tm_wq
,
3616 test_bit(free_slot
, &hba
->tm_condition
),
3617 msecs_to_jiffies(TM_CMD_TIMEOUT
));
3619 dev_err(hba
->dev
, "%s: task management cmd 0x%.2x timed-out\n",
3620 __func__
, tm_function
);
3621 if (ufshcd_clear_tm_cmd(hba
, free_slot
))
3622 dev_WARN(hba
->dev
, "%s: unable clear tm cmd (slot %d) after timeout\n",
3623 __func__
, free_slot
);
3626 err
= ufshcd_task_req_compl(hba
, free_slot
, tm_response
);
3629 clear_bit(free_slot
, &hba
->tm_condition
);
3630 ufshcd_put_tm_slot(hba
, free_slot
);
3631 wake_up(&hba
->tm_tag_wq
);
3633 ufshcd_release(hba
);
3638 * ufshcd_eh_device_reset_handler - device reset handler registered to
3640 * @cmd: SCSI command pointer
3642 * Returns SUCCESS/FAILED
3644 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd
*cmd
)
3646 struct Scsi_Host
*host
;
3647 struct ufs_hba
*hba
;
3652 struct ufshcd_lrb
*lrbp
;
3653 unsigned long flags
;
3655 host
= cmd
->device
->host
;
3656 hba
= shost_priv(host
);
3657 tag
= cmd
->request
->tag
;
3659 lrbp
= &hba
->lrb
[tag
];
3660 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, 0, UFS_LOGICAL_RESET
, &resp
);
3661 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
3667 /* clear the commands that were pending for corresponding LUN */
3668 for_each_set_bit(pos
, &hba
->outstanding_reqs
, hba
->nutrs
) {
3669 if (hba
->lrb
[pos
].lun
== lrbp
->lun
) {
3670 err
= ufshcd_clear_cmd(hba
, pos
);
3675 spin_lock_irqsave(host
->host_lock
, flags
);
3676 ufshcd_transfer_req_compl(hba
);
3677 spin_unlock_irqrestore(host
->host_lock
, flags
);
3682 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
3689 * ufshcd_abort - abort a specific command
3690 * @cmd: SCSI command pointer
3692 * Abort the pending command in device by sending UFS_ABORT_TASK task management
3693 * command, and in host controller by clearing the door-bell register. There can
3694 * be race between controller sending the command to the device while abort is
3695 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
3696 * really issued and then try to abort it.
3698 * Returns SUCCESS/FAILED
3700 static int ufshcd_abort(struct scsi_cmnd
*cmd
)
3702 struct Scsi_Host
*host
;
3703 struct ufs_hba
*hba
;
3704 unsigned long flags
;
3709 struct ufshcd_lrb
*lrbp
;
3712 host
= cmd
->device
->host
;
3713 hba
= shost_priv(host
);
3714 tag
= cmd
->request
->tag
;
3716 ufshcd_hold(hba
, false);
3717 /* If command is already aborted/completed, return SUCCESS */
3718 if (!(test_bit(tag
, &hba
->outstanding_reqs
)))
3721 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
3722 if (!(reg
& (1 << tag
))) {
3724 "%s: cmd was completed, but without a notifying intr, tag = %d",
3728 lrbp
= &hba
->lrb
[tag
];
3729 for (poll_cnt
= 100; poll_cnt
; poll_cnt
--) {
3730 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
3731 UFS_QUERY_TASK
, &resp
);
3732 if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED
) {
3733 /* cmd pending in the device */
3735 } else if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
3737 * cmd not pending in the device, check if it is
3740 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
3741 if (reg
& (1 << tag
)) {
3742 /* sleep for max. 200us to stabilize */
3743 usleep_range(100, 200);
3746 /* command completed already */
3750 err
= resp
; /* service response error */
3760 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
3761 UFS_ABORT_TASK
, &resp
);
3762 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
3764 err
= resp
; /* service response error */
3768 err
= ufshcd_clear_cmd(hba
, tag
);
3772 scsi_dma_unmap(cmd
);
3774 spin_lock_irqsave(host
->host_lock
, flags
);
3775 __clear_bit(tag
, &hba
->outstanding_reqs
);
3776 hba
->lrb
[tag
].cmd
= NULL
;
3777 spin_unlock_irqrestore(host
->host_lock
, flags
);
3779 clear_bit_unlock(tag
, &hba
->lrb_in_use
);
3780 wake_up(&hba
->dev_cmd
.tag_wq
);
3786 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
3791 * This ufshcd_release() corresponds to the original scsi cmd that got
3792 * aborted here (as we won't get any IRQ for it).
3794 ufshcd_release(hba
);
3799 * ufshcd_host_reset_and_restore - reset and restore host controller
3800 * @hba: per-adapter instance
3802 * Note that host controller reset may issue DME_RESET to
3803 * local and remote (device) Uni-Pro stack and the attributes
3804 * are reset to default state.
3806 * Returns zero on success, non-zero on failure
3808 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
)
3811 unsigned long flags
;
3813 /* Reset the host controller */
3814 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3815 ufshcd_hba_stop(hba
);
3816 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3818 err
= ufshcd_hba_enable(hba
);
3822 /* Establish the link again and restore the device */
3823 err
= ufshcd_probe_hba(hba
);
3825 if (!err
&& (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
))
3829 dev_err(hba
->dev
, "%s: Host init failed %d\n", __func__
, err
);
3835 * ufshcd_reset_and_restore - reset and re-initialize host/device
3836 * @hba: per-adapter instance
3838 * Reset and recover device, host and re-establish link. This
3839 * is helpful to recover the communication in fatal error conditions.
3841 * Returns zero on success, non-zero on failure
3843 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
)
3846 unsigned long flags
;
3847 int retries
= MAX_HOST_RESET_RETRIES
;
3850 err
= ufshcd_host_reset_and_restore(hba
);
3851 } while (err
&& --retries
);
3854 * After reset the door-bell might be cleared, complete
3855 * outstanding requests in s/w here.
3857 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3858 ufshcd_transfer_req_compl(hba
);
3859 ufshcd_tmc_handler(hba
);
3860 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3866 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
3867 * @cmd - SCSI command pointer
3869 * Returns SUCCESS/FAILED
3871 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
)
3874 unsigned long flags
;
3875 struct ufs_hba
*hba
;
3877 hba
= shost_priv(cmd
->device
->host
);
3879 ufshcd_hold(hba
, false);
3881 * Check if there is any race with fatal error handling.
3882 * If so, wait for it to complete. Even though fatal error
3883 * handling does reset and restore in some cases, don't assume
3884 * anything out of it. We are just avoiding race here.
3887 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3888 if (!(work_pending(&hba
->eh_work
) ||
3889 hba
->ufshcd_state
== UFSHCD_STATE_RESET
))
3891 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3892 dev_dbg(hba
->dev
, "%s: reset in progress\n", __func__
);
3893 flush_work(&hba
->eh_work
);
3896 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
3897 ufshcd_set_eh_in_progress(hba
);
3898 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3900 err
= ufshcd_reset_and_restore(hba
);
3902 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3905 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
3908 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
3910 ufshcd_clear_eh_in_progress(hba
);
3911 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3913 ufshcd_release(hba
);
3918 * ufshcd_get_max_icc_level - calculate the ICC level
3919 * @sup_curr_uA: max. current supported by the regulator
3920 * @start_scan: row at the desc table to start scan from
3921 * @buff: power descriptor buffer
3923 * Returns calculated max ICC level for specific regulator
3925 static u32
ufshcd_get_max_icc_level(int sup_curr_uA
, u32 start_scan
, char *buff
)
3932 for (i
= start_scan
; i
>= 0; i
--) {
3933 data
= be16_to_cpu(*((u16
*)(buff
+ 2*i
)));
3934 unit
= (data
& ATTR_ICC_LVL_UNIT_MASK
) >>
3935 ATTR_ICC_LVL_UNIT_OFFSET
;
3936 curr_uA
= data
& ATTR_ICC_LVL_VALUE_MASK
;
3938 case UFSHCD_NANO_AMP
:
3939 curr_uA
= curr_uA
/ 1000;
3941 case UFSHCD_MILI_AMP
:
3942 curr_uA
= curr_uA
* 1000;
3945 curr_uA
= curr_uA
* 1000 * 1000;
3947 case UFSHCD_MICRO_AMP
:
3951 if (sup_curr_uA
>= curr_uA
)
3956 pr_err("%s: Couldn't find valid icc_level = %d", __func__
, i
);
3963 * ufshcd_calc_icc_level - calculate the max ICC level
3964 * In case regulators are not initialized we'll return 0
3965 * @hba: per-adapter instance
3966 * @desc_buf: power descriptor buffer to extract ICC levels from.
3967 * @len: length of desc_buff
3969 * Returns calculated ICC level
3971 static u32
ufshcd_find_max_sup_active_icc_level(struct ufs_hba
*hba
,
3972 u8
*desc_buf
, int len
)
3976 if (!hba
->vreg_info
.vcc
|| !hba
->vreg_info
.vccq
||
3977 !hba
->vreg_info
.vccq2
) {
3979 "%s: Regulator capability was not set, actvIccLevel=%d",
3980 __func__
, icc_level
);
3984 if (hba
->vreg_info
.vcc
)
3985 icc_level
= ufshcd_get_max_icc_level(
3986 hba
->vreg_info
.vcc
->max_uA
,
3987 POWER_DESC_MAX_ACTV_ICC_LVLS
- 1,
3988 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCC_0
]);
3990 if (hba
->vreg_info
.vccq
)
3991 icc_level
= ufshcd_get_max_icc_level(
3992 hba
->vreg_info
.vccq
->max_uA
,
3994 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ_0
]);
3996 if (hba
->vreg_info
.vccq2
)
3997 icc_level
= ufshcd_get_max_icc_level(
3998 hba
->vreg_info
.vccq2
->max_uA
,
4000 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ2_0
]);
4005 static void ufshcd_init_icc_levels(struct ufs_hba
*hba
)
4008 int buff_len
= QUERY_DESC_POWER_MAX_SIZE
;
4009 u8 desc_buf
[QUERY_DESC_POWER_MAX_SIZE
];
4011 ret
= ufshcd_read_power_desc(hba
, desc_buf
, buff_len
);
4014 "%s: Failed reading power descriptor.len = %d ret = %d",
4015 __func__
, buff_len
, ret
);
4019 hba
->init_prefetch_data
.icc_level
=
4020 ufshcd_find_max_sup_active_icc_level(hba
,
4021 desc_buf
, buff_len
);
4022 dev_dbg(hba
->dev
, "%s: setting icc_level 0x%x",
4023 __func__
, hba
->init_prefetch_data
.icc_level
);
4025 ret
= ufshcd_query_attr(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
4026 QUERY_ATTR_IDN_ACTIVE_ICC_LVL
, 0, 0,
4027 &hba
->init_prefetch_data
.icc_level
);
4031 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
4032 __func__
, hba
->init_prefetch_data
.icc_level
, ret
);
4037 * ufshcd_scsi_add_wlus - Adds required W-LUs
4038 * @hba: per-adapter instance
4040 * UFS device specification requires the UFS devices to support 4 well known
4042 * "REPORT_LUNS" (address: 01h)
4043 * "UFS Device" (address: 50h)
4044 * "RPMB" (address: 44h)
4045 * "BOOT" (address: 30h)
4046 * UFS device's power management needs to be controlled by "POWER CONDITION"
4047 * field of SSU (START STOP UNIT) command. But this "power condition" field
4048 * will take effect only when its sent to "UFS device" well known logical unit
4049 * hence we require the scsi_device instance to represent this logical unit in
4050 * order for the UFS host driver to send the SSU command for power management.
4052 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
4053 * Block) LU so user space process can control this LU. User space may also
4054 * want to have access to BOOT LU.
4056 * This function adds scsi device instances for each of all well known LUs
4057 * (except "REPORT LUNS" LU).
4059 * Returns zero on success (all required W-LUs are added successfully),
4060 * non-zero error value on failure (if failed to add any of the required W-LU).
4062 static int ufshcd_scsi_add_wlus(struct ufs_hba
*hba
)
4066 hba
->sdev_ufs_device
= __scsi_add_device(hba
->host
, 0, 0,
4067 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
), NULL
);
4068 if (IS_ERR(hba
->sdev_ufs_device
)) {
4069 ret
= PTR_ERR(hba
->sdev_ufs_device
);
4070 hba
->sdev_ufs_device
= NULL
;
4074 hba
->sdev_boot
= __scsi_add_device(hba
->host
, 0, 0,
4075 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN
), NULL
);
4076 if (IS_ERR(hba
->sdev_boot
)) {
4077 ret
= PTR_ERR(hba
->sdev_boot
);
4078 hba
->sdev_boot
= NULL
;
4079 goto remove_sdev_ufs_device
;
4082 hba
->sdev_rpmb
= __scsi_add_device(hba
->host
, 0, 0,
4083 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN
), NULL
);
4084 if (IS_ERR(hba
->sdev_rpmb
)) {
4085 ret
= PTR_ERR(hba
->sdev_rpmb
);
4086 hba
->sdev_rpmb
= NULL
;
4087 goto remove_sdev_boot
;
4092 scsi_remove_device(hba
->sdev_boot
);
4093 remove_sdev_ufs_device
:
4094 scsi_remove_device(hba
->sdev_ufs_device
);
4100 * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by
4101 * ufshcd_scsi_add_wlus()
4102 * @hba: per-adapter instance
4105 static void ufshcd_scsi_remove_wlus(struct ufs_hba
*hba
)
4107 if (hba
->sdev_ufs_device
) {
4108 scsi_remove_device(hba
->sdev_ufs_device
);
4109 hba
->sdev_ufs_device
= NULL
;
4112 if (hba
->sdev_boot
) {
4113 scsi_remove_device(hba
->sdev_boot
);
4114 hba
->sdev_boot
= NULL
;
4117 if (hba
->sdev_rpmb
) {
4118 scsi_remove_device(hba
->sdev_rpmb
);
4119 hba
->sdev_rpmb
= NULL
;
4124 * ufshcd_probe_hba - probe hba to detect device and initialize
4125 * @hba: per-adapter instance
4127 * Execute link-startup and verify device initialization
4129 static int ufshcd_probe_hba(struct ufs_hba
*hba
)
4133 ret
= ufshcd_link_startup(hba
);
4137 /* UniPro link is active now */
4138 ufshcd_set_link_active(hba
);
4140 ret
= ufshcd_verify_dev_init(hba
);
4144 ret
= ufshcd_complete_dev_init(hba
);
4148 /* UFS device is also active now */
4149 ufshcd_set_ufs_dev_active(hba
);
4150 ufshcd_force_reset_auto_bkops(hba
);
4151 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
4152 hba
->wlun_dev_clr_ua
= true;
4154 if (ufshcd_get_max_pwr_mode(hba
)) {
4156 "%s: Failed getting max supported power mode\n",
4159 ret
= ufshcd_config_pwr_mode(hba
, &hba
->max_pwr_info
.info
);
4161 dev_err(hba
->dev
, "%s: Failed setting power mode, err = %d\n",
4166 * If we are in error handling context or in power management callbacks
4167 * context, no need to scan the host
4169 if (!ufshcd_eh_in_progress(hba
) && !hba
->pm_op_in_progress
) {
4172 /* clear any previous UFS device information */
4173 memset(&hba
->dev_info
, 0, sizeof(hba
->dev_info
));
4174 if (!ufshcd_query_flag(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
4175 QUERY_FLAG_IDN_PWR_ON_WPE
, &flag
))
4176 hba
->dev_info
.f_power_on_wp_en
= flag
;
4178 if (!hba
->is_init_prefetch
)
4179 ufshcd_init_icc_levels(hba
);
4181 /* Add required well known logical units to scsi mid layer */
4182 if (ufshcd_scsi_add_wlus(hba
))
4185 scsi_scan_host(hba
->host
);
4186 pm_runtime_put_sync(hba
->dev
);
4189 if (!hba
->is_init_prefetch
)
4190 hba
->is_init_prefetch
= true;
4192 /* Resume devfreq after UFS device is detected */
4193 if (ufshcd_is_clkscaling_enabled(hba
))
4194 devfreq_resume_device(hba
->devfreq
);
4198 * If we failed to initialize the device or the device is not
4199 * present, turn off the power/clocks etc.
4201 if (ret
&& !ufshcd_eh_in_progress(hba
) && !hba
->pm_op_in_progress
) {
4202 pm_runtime_put_sync(hba
->dev
);
4203 ufshcd_hba_exit(hba
);
4210 * ufshcd_async_scan - asynchronous execution for probing hba
4211 * @data: data pointer to pass to this function
4212 * @cookie: cookie data
4214 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
)
4216 struct ufs_hba
*hba
= (struct ufs_hba
*)data
;
4218 ufshcd_probe_hba(hba
);
4221 static struct scsi_host_template ufshcd_driver_template
= {
4222 .module
= THIS_MODULE
,
4224 .proc_name
= UFSHCD
,
4225 .queuecommand
= ufshcd_queuecommand
,
4226 .slave_alloc
= ufshcd_slave_alloc
,
4227 .slave_configure
= ufshcd_slave_configure
,
4228 .slave_destroy
= ufshcd_slave_destroy
,
4229 .change_queue_depth
= ufshcd_change_queue_depth
,
4230 .eh_abort_handler
= ufshcd_abort
,
4231 .eh_device_reset_handler
= ufshcd_eh_device_reset_handler
,
4232 .eh_host_reset_handler
= ufshcd_eh_host_reset_handler
,
4234 .sg_tablesize
= SG_ALL
,
4235 .cmd_per_lun
= UFSHCD_CMD_PER_LUN
,
4236 .can_queue
= UFSHCD_CAN_QUEUE
,
4237 .max_host_blocked
= 1,
4240 static int ufshcd_config_vreg_load(struct device
*dev
, struct ufs_vreg
*vreg
,
4244 struct regulator
*reg
= vreg
->reg
;
4245 const char *name
= vreg
->name
;
4249 ret
= regulator_set_optimum_mode(reg
, ua
);
4252 * regulator_set_optimum_mode() returns new regulator
4253 * mode upon success.
4257 dev_err(dev
, "%s: %s set optimum mode(ua=%d) failed, err=%d\n",
4258 __func__
, name
, ua
, ret
);
4264 static inline int ufshcd_config_vreg_lpm(struct ufs_hba
*hba
,
4265 struct ufs_vreg
*vreg
)
4267 return ufshcd_config_vreg_load(hba
->dev
, vreg
, UFS_VREG_LPM_LOAD_UA
);
4270 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
4271 struct ufs_vreg
*vreg
)
4273 return ufshcd_config_vreg_load(hba
->dev
, vreg
, vreg
->max_uA
);
4276 static int ufshcd_config_vreg(struct device
*dev
,
4277 struct ufs_vreg
*vreg
, bool on
)
4280 struct regulator
*reg
= vreg
->reg
;
4281 const char *name
= vreg
->name
;
4282 int min_uV
, uA_load
;
4286 if (regulator_count_voltages(reg
) > 0) {
4287 min_uV
= on
? vreg
->min_uV
: 0;
4288 ret
= regulator_set_voltage(reg
, min_uV
, vreg
->max_uV
);
4290 dev_err(dev
, "%s: %s set voltage failed, err=%d\n",
4291 __func__
, name
, ret
);
4295 uA_load
= on
? vreg
->max_uA
: 0;
4296 ret
= ufshcd_config_vreg_load(dev
, vreg
, uA_load
);
4304 static int ufshcd_enable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
4308 if (!vreg
|| vreg
->enabled
)
4311 ret
= ufshcd_config_vreg(dev
, vreg
, true);
4313 ret
= regulator_enable(vreg
->reg
);
4316 vreg
->enabled
= true;
4318 dev_err(dev
, "%s: %s enable failed, err=%d\n",
4319 __func__
, vreg
->name
, ret
);
4324 static int ufshcd_disable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
4328 if (!vreg
|| !vreg
->enabled
)
4331 ret
= regulator_disable(vreg
->reg
);
4334 /* ignore errors on applying disable config */
4335 ufshcd_config_vreg(dev
, vreg
, false);
4336 vreg
->enabled
= false;
4338 dev_err(dev
, "%s: %s disable failed, err=%d\n",
4339 __func__
, vreg
->name
, ret
);
4345 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
)
4348 struct device
*dev
= hba
->dev
;
4349 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
4354 ret
= ufshcd_toggle_vreg(dev
, info
->vcc
, on
);
4358 ret
= ufshcd_toggle_vreg(dev
, info
->vccq
, on
);
4362 ret
= ufshcd_toggle_vreg(dev
, info
->vccq2
, on
);
4368 ufshcd_toggle_vreg(dev
, info
->vccq2
, false);
4369 ufshcd_toggle_vreg(dev
, info
->vccq
, false);
4370 ufshcd_toggle_vreg(dev
, info
->vcc
, false);
4375 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
)
4377 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
4380 return ufshcd_toggle_vreg(hba
->dev
, info
->vdd_hba
, on
);
4385 static int ufshcd_get_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
4392 vreg
->reg
= devm_regulator_get(dev
, vreg
->name
);
4393 if (IS_ERR(vreg
->reg
)) {
4394 ret
= PTR_ERR(vreg
->reg
);
4395 dev_err(dev
, "%s: %s get failed, err=%d\n",
4396 __func__
, vreg
->name
, ret
);
4402 static int ufshcd_init_vreg(struct ufs_hba
*hba
)
4405 struct device
*dev
= hba
->dev
;
4406 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
4411 ret
= ufshcd_get_vreg(dev
, info
->vcc
);
4415 ret
= ufshcd_get_vreg(dev
, info
->vccq
);
4419 ret
= ufshcd_get_vreg(dev
, info
->vccq2
);
4424 static int ufshcd_init_hba_vreg(struct ufs_hba
*hba
)
4426 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
4429 return ufshcd_get_vreg(hba
->dev
, info
->vdd_hba
);
4434 static int __ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
,
4438 struct ufs_clk_info
*clki
;
4439 struct list_head
*head
= &hba
->clk_list_head
;
4440 unsigned long flags
;
4442 if (!head
|| list_empty(head
))
4445 list_for_each_entry(clki
, head
, list
) {
4446 if (!IS_ERR_OR_NULL(clki
->clk
)) {
4447 if (skip_ref_clk
&& !strcmp(clki
->name
, "ref_clk"))
4450 if (on
&& !clki
->enabled
) {
4451 ret
= clk_prepare_enable(clki
->clk
);
4453 dev_err(hba
->dev
, "%s: %s prepare enable failed, %d\n",
4454 __func__
, clki
->name
, ret
);
4457 } else if (!on
&& clki
->enabled
) {
4458 clk_disable_unprepare(clki
->clk
);
4461 dev_dbg(hba
->dev
, "%s: clk: %s %sabled\n", __func__
,
4462 clki
->name
, on
? "en" : "dis");
4466 if (hba
->vops
&& hba
->vops
->setup_clocks
)
4467 ret
= hba
->vops
->setup_clocks(hba
, on
);
4470 list_for_each_entry(clki
, head
, list
) {
4471 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->enabled
)
4472 clk_disable_unprepare(clki
->clk
);
4474 } else if (!ret
&& on
) {
4475 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4476 hba
->clk_gating
.state
= CLKS_ON
;
4477 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4482 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
)
4484 return __ufshcd_setup_clocks(hba
, on
, false);
4487 static int ufshcd_init_clocks(struct ufs_hba
*hba
)
4490 struct ufs_clk_info
*clki
;
4491 struct device
*dev
= hba
->dev
;
4492 struct list_head
*head
= &hba
->clk_list_head
;
4494 if (!head
|| list_empty(head
))
4497 list_for_each_entry(clki
, head
, list
) {
4501 clki
->clk
= devm_clk_get(dev
, clki
->name
);
4502 if (IS_ERR(clki
->clk
)) {
4503 ret
= PTR_ERR(clki
->clk
);
4504 dev_err(dev
, "%s: %s clk get failed, %d\n",
4505 __func__
, clki
->name
, ret
);
4509 if (clki
->max_freq
) {
4510 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
4512 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
4513 __func__
, clki
->name
,
4514 clki
->max_freq
, ret
);
4517 clki
->curr_freq
= clki
->max_freq
;
4519 dev_dbg(dev
, "%s: clk: %s, rate: %lu\n", __func__
,
4520 clki
->name
, clk_get_rate(clki
->clk
));
4526 static int ufshcd_variant_hba_init(struct ufs_hba
*hba
)
4533 if (hba
->vops
->init
) {
4534 err
= hba
->vops
->init(hba
);
4539 if (hba
->vops
->setup_regulators
) {
4540 err
= hba
->vops
->setup_regulators(hba
, true);
4548 if (hba
->vops
->exit
)
4549 hba
->vops
->exit(hba
);
4552 dev_err(hba
->dev
, "%s: variant %s init failed err %d\n",
4553 __func__
, hba
->vops
? hba
->vops
->name
: "", err
);
4557 static void ufshcd_variant_hba_exit(struct ufs_hba
*hba
)
4562 if (hba
->vops
->setup_clocks
)
4563 hba
->vops
->setup_clocks(hba
, false);
4565 if (hba
->vops
->setup_regulators
)
4566 hba
->vops
->setup_regulators(hba
, false);
4568 if (hba
->vops
->exit
)
4569 hba
->vops
->exit(hba
);
4572 static int ufshcd_hba_init(struct ufs_hba
*hba
)
4577 * Handle host controller power separately from the UFS device power
4578 * rails as it will help controlling the UFS host controller power
4579 * collapse easily which is different than UFS device power collapse.
4580 * Also, enable the host controller power before we go ahead with rest
4581 * of the initialization here.
4583 err
= ufshcd_init_hba_vreg(hba
);
4587 err
= ufshcd_setup_hba_vreg(hba
, true);
4591 err
= ufshcd_init_clocks(hba
);
4593 goto out_disable_hba_vreg
;
4595 err
= ufshcd_setup_clocks(hba
, true);
4597 goto out_disable_hba_vreg
;
4599 err
= ufshcd_init_vreg(hba
);
4601 goto out_disable_clks
;
4603 err
= ufshcd_setup_vreg(hba
, true);
4605 goto out_disable_clks
;
4607 err
= ufshcd_variant_hba_init(hba
);
4609 goto out_disable_vreg
;
4611 hba
->is_powered
= true;
4615 ufshcd_setup_vreg(hba
, false);
4617 ufshcd_setup_clocks(hba
, false);
4618 out_disable_hba_vreg
:
4619 ufshcd_setup_hba_vreg(hba
, false);
4624 static void ufshcd_hba_exit(struct ufs_hba
*hba
)
4626 if (hba
->is_powered
) {
4627 ufshcd_variant_hba_exit(hba
);
4628 ufshcd_setup_vreg(hba
, false);
4629 ufshcd_setup_clocks(hba
, false);
4630 ufshcd_setup_hba_vreg(hba
, false);
4631 hba
->is_powered
= false;
4636 ufshcd_send_request_sense(struct ufs_hba
*hba
, struct scsi_device
*sdp
)
4638 unsigned char cmd
[6] = {REQUEST_SENSE
,
4642 SCSI_SENSE_BUFFERSIZE
,
4647 buffer
= kzalloc(SCSI_SENSE_BUFFERSIZE
, GFP_KERNEL
);
4653 ret
= scsi_execute_req_flags(sdp
, cmd
, DMA_FROM_DEVICE
, buffer
,
4654 SCSI_SENSE_BUFFERSIZE
, NULL
,
4655 msecs_to_jiffies(1000), 3, NULL
, REQ_PM
);
4657 pr_err("%s: failed with err %d\n", __func__
, ret
);
4665 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
4667 * @hba: per adapter instance
4668 * @pwr_mode: device power mode to set
4670 * Returns 0 if requested power mode is set successfully
4671 * Returns non-zero if failed to set the requested power mode
4673 static int ufshcd_set_dev_pwr_mode(struct ufs_hba
*hba
,
4674 enum ufs_dev_pwr_mode pwr_mode
)
4676 unsigned char cmd
[6] = { START_STOP
};
4677 struct scsi_sense_hdr sshdr
;
4678 struct scsi_device
*sdp
= hba
->sdev_ufs_device
;
4681 if (!sdp
|| !scsi_device_online(sdp
))
4685 * If scsi commands fail, the scsi mid-layer schedules scsi error-
4686 * handling, which would wait for host to be resumed. Since we know
4687 * we are functional while we are here, skip host resume in error
4690 hba
->host
->eh_noresume
= 1;
4691 if (hba
->wlun_dev_clr_ua
) {
4692 ret
= ufshcd_send_request_sense(hba
, sdp
);
4695 /* Unit attention condition is cleared now */
4696 hba
->wlun_dev_clr_ua
= false;
4699 cmd
[4] = pwr_mode
<< 4;
4702 * Current function would be generally called from the power management
4703 * callbacks hence set the REQ_PM flag so that it doesn't resume the
4704 * already suspended childs.
4706 ret
= scsi_execute_req_flags(sdp
, cmd
, DMA_NONE
, NULL
, 0, &sshdr
,
4707 START_STOP_TIMEOUT
, 0, NULL
, REQ_PM
);
4709 sdev_printk(KERN_WARNING
, sdp
,
4710 "START_STOP failed for power mode: %d\n", pwr_mode
);
4711 scsi_show_result(ret
);
4712 if (driver_byte(ret
) & DRIVER_SENSE
) {
4713 scsi_show_sense_hdr(&sshdr
);
4714 scsi_show_extd_sense(sshdr
.asc
, sshdr
.ascq
);
4719 hba
->curr_dev_pwr_mode
= pwr_mode
;
4721 hba
->host
->eh_noresume
= 0;
4725 static int ufshcd_link_state_transition(struct ufs_hba
*hba
,
4726 enum uic_link_state req_link_state
,
4727 int check_for_bkops
)
4731 if (req_link_state
== hba
->uic_link_state
)
4734 if (req_link_state
== UIC_LINK_HIBERN8_STATE
) {
4735 ret
= ufshcd_uic_hibern8_enter(hba
);
4737 ufshcd_set_link_hibern8(hba
);
4742 * If autobkops is enabled, link can't be turned off because
4743 * turning off the link would also turn off the device.
4745 else if ((req_link_state
== UIC_LINK_OFF_STATE
) &&
4746 (!check_for_bkops
|| (check_for_bkops
&&
4747 !hba
->auto_bkops_enabled
))) {
4749 * Change controller state to "reset state" which
4750 * should also put the link in off/reset state
4752 ufshcd_hba_stop(hba
);
4754 * TODO: Check if we need any delay to make sure that
4755 * controller is reset
4757 ufshcd_set_link_off(hba
);
4764 static void ufshcd_vreg_set_lpm(struct ufs_hba
*hba
)
4767 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
4770 * If UFS device and link is in OFF state, all power supplies (VCC,
4771 * VCCQ, VCCQ2) can be turned off if power on write protect is not
4772 * required. If UFS link is inactive (Hibern8 or OFF state) and device
4773 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
4775 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
4776 * in low power state which would save some power.
4778 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
4779 !hba
->dev_info
.is_lu_power_on_wp
) {
4780 ufshcd_setup_vreg(hba
, false);
4781 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
4782 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
4783 if (!ufshcd_is_link_active(hba
)) {
4784 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
4785 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq2
);
4790 static int ufshcd_vreg_set_hpm(struct ufs_hba
*hba
)
4794 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
4795 !hba
->dev_info
.is_lu_power_on_wp
) {
4796 ret
= ufshcd_setup_vreg(hba
, true);
4797 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
4798 ret
= ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, true);
4799 if (!ret
&& !ufshcd_is_link_active(hba
)) {
4800 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
4803 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
4811 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
4813 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
4818 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
)
4820 if (ufshcd_is_link_off(hba
))
4821 ufshcd_setup_hba_vreg(hba
, false);
4824 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
)
4826 if (ufshcd_is_link_off(hba
))
4827 ufshcd_setup_hba_vreg(hba
, true);
4831 * ufshcd_suspend - helper function for suspend operations
4832 * @hba: per adapter instance
4833 * @pm_op: desired low power operation type
4835 * This function will try to put the UFS device and link into low power
4836 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
4837 * (System PM level).
4839 * If this function is called during shutdown, it will make sure that
4840 * both UFS device and UFS link is powered off.
4842 * NOTE: UFS device & link must be active before we enter in this function.
4844 * Returns 0 for success and non-zero for failure
4846 static int ufshcd_suspend(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
4849 enum ufs_pm_level pm_lvl
;
4850 enum ufs_dev_pwr_mode req_dev_pwr_mode
;
4851 enum uic_link_state req_link_state
;
4853 hba
->pm_op_in_progress
= 1;
4854 if (!ufshcd_is_shutdown_pm(pm_op
)) {
4855 pm_lvl
= ufshcd_is_runtime_pm(pm_op
) ?
4856 hba
->rpm_lvl
: hba
->spm_lvl
;
4857 req_dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl
);
4858 req_link_state
= ufs_get_pm_lvl_to_link_pwr_state(pm_lvl
);
4860 req_dev_pwr_mode
= UFS_POWERDOWN_PWR_MODE
;
4861 req_link_state
= UIC_LINK_OFF_STATE
;
4865 * If we can't transition into any of the low power modes
4866 * just gate the clocks.
4868 ufshcd_hold(hba
, false);
4869 hba
->clk_gating
.is_suspended
= true;
4871 if (req_dev_pwr_mode
== UFS_ACTIVE_PWR_MODE
&&
4872 req_link_state
== UIC_LINK_ACTIVE_STATE
) {
4876 if ((req_dev_pwr_mode
== hba
->curr_dev_pwr_mode
) &&
4877 (req_link_state
== hba
->uic_link_state
))
4880 /* UFS device & link must be active before we enter in this function */
4881 if (!ufshcd_is_ufs_dev_active(hba
) || !ufshcd_is_link_active(hba
)) {
4886 if (ufshcd_is_runtime_pm(pm_op
)) {
4887 if (ufshcd_can_autobkops_during_suspend(hba
)) {
4889 * The device is idle with no requests in the queue,
4890 * allow background operations if bkops status shows
4891 * that performance might be impacted.
4893 ret
= ufshcd_urgent_bkops(hba
);
4897 /* make sure that auto bkops is disabled */
4898 ufshcd_disable_auto_bkops(hba
);
4902 if ((req_dev_pwr_mode
!= hba
->curr_dev_pwr_mode
) &&
4903 ((ufshcd_is_runtime_pm(pm_op
) && !hba
->auto_bkops_enabled
) ||
4904 !ufshcd_is_runtime_pm(pm_op
))) {
4905 /* ensure that bkops is disabled */
4906 ufshcd_disable_auto_bkops(hba
);
4907 ret
= ufshcd_set_dev_pwr_mode(hba
, req_dev_pwr_mode
);
4912 ret
= ufshcd_link_state_transition(hba
, req_link_state
, 1);
4914 goto set_dev_active
;
4916 ufshcd_vreg_set_lpm(hba
);
4920 * The clock scaling needs access to controller registers. Hence, Wait
4921 * for pending clock scaling work to be done before clocks are
4924 if (ufshcd_is_clkscaling_enabled(hba
)) {
4925 devfreq_suspend_device(hba
->devfreq
);
4926 hba
->clk_scaling
.window_start_t
= 0;
4929 * Call vendor specific suspend callback. As these callbacks may access
4930 * vendor specific host controller register space call them before the
4931 * host clocks are ON.
4933 if (hba
->vops
&& hba
->vops
->suspend
) {
4934 ret
= hba
->vops
->suspend(hba
, pm_op
);
4936 goto set_link_active
;
4939 if (hba
->vops
&& hba
->vops
->setup_clocks
) {
4940 ret
= hba
->vops
->setup_clocks(hba
, false);
4945 if (!ufshcd_is_link_active(hba
))
4946 ufshcd_setup_clocks(hba
, false);
4948 /* If link is active, device ref_clk can't be switched off */
4949 __ufshcd_setup_clocks(hba
, false, true);
4951 hba
->clk_gating
.state
= CLKS_OFF
;
4953 * Disable the host irq as host controller as there won't be any
4954 * host controller trasanction expected till resume.
4956 ufshcd_disable_irq(hba
);
4957 /* Put the host controller in low power mode if possible */
4958 ufshcd_hba_vreg_set_lpm(hba
);
4962 if (hba
->vops
&& hba
->vops
->resume
)
4963 hba
->vops
->resume(hba
, pm_op
);
4965 ufshcd_vreg_set_hpm(hba
);
4966 if (ufshcd_is_link_hibern8(hba
) && !ufshcd_uic_hibern8_exit(hba
))
4967 ufshcd_set_link_active(hba
);
4968 else if (ufshcd_is_link_off(hba
))
4969 ufshcd_host_reset_and_restore(hba
);
4971 if (!ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
))
4972 ufshcd_disable_auto_bkops(hba
);
4974 hba
->clk_gating
.is_suspended
= false;
4975 ufshcd_release(hba
);
4977 hba
->pm_op_in_progress
= 0;
4982 * ufshcd_resume - helper function for resume operations
4983 * @hba: per adapter instance
4984 * @pm_op: runtime PM or system PM
4986 * This function basically brings the UFS device, UniPro link and controller
4989 * Returns 0 for success and non-zero for failure
4991 static int ufshcd_resume(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
4994 enum uic_link_state old_link_state
;
4996 hba
->pm_op_in_progress
= 1;
4997 old_link_state
= hba
->uic_link_state
;
4999 ufshcd_hba_vreg_set_hpm(hba
);
5000 /* Make sure clocks are enabled before accessing controller */
5001 ret
= ufshcd_setup_clocks(hba
, true);
5005 /* enable the host irq as host controller would be active soon */
5006 ret
= ufshcd_enable_irq(hba
);
5008 goto disable_irq_and_vops_clks
;
5010 ret
= ufshcd_vreg_set_hpm(hba
);
5012 goto disable_irq_and_vops_clks
;
5015 * Call vendor specific resume callback. As these callbacks may access
5016 * vendor specific host controller register space call them when the
5017 * host clocks are ON.
5019 if (hba
->vops
&& hba
->vops
->resume
) {
5020 ret
= hba
->vops
->resume(hba
, pm_op
);
5025 if (ufshcd_is_link_hibern8(hba
)) {
5026 ret
= ufshcd_uic_hibern8_exit(hba
);
5028 ufshcd_set_link_active(hba
);
5030 goto vendor_suspend
;
5031 } else if (ufshcd_is_link_off(hba
)) {
5032 ret
= ufshcd_host_reset_and_restore(hba
);
5034 * ufshcd_host_reset_and_restore() should have already
5035 * set the link state as active
5037 if (ret
|| !ufshcd_is_link_active(hba
))
5038 goto vendor_suspend
;
5041 if (!ufshcd_is_ufs_dev_active(hba
)) {
5042 ret
= ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
);
5044 goto set_old_link_state
;
5048 * If BKOPs operations are urgently needed at this moment then
5049 * keep auto-bkops enabled or else disable it.
5051 ufshcd_urgent_bkops(hba
);
5052 hba
->clk_gating
.is_suspended
= false;
5054 if (ufshcd_is_clkscaling_enabled(hba
))
5055 devfreq_resume_device(hba
->devfreq
);
5057 /* Schedule clock gating in case of no access to UFS device yet */
5058 ufshcd_release(hba
);
5062 ufshcd_link_state_transition(hba
, old_link_state
, 0);
5064 if (hba
->vops
&& hba
->vops
->suspend
)
5065 hba
->vops
->suspend(hba
, pm_op
);
5067 ufshcd_vreg_set_lpm(hba
);
5068 disable_irq_and_vops_clks
:
5069 ufshcd_disable_irq(hba
);
5070 ufshcd_setup_clocks(hba
, false);
5072 hba
->pm_op_in_progress
= 0;
5077 * ufshcd_system_suspend - system suspend routine
5078 * @hba: per adapter instance
5079 * @pm_op: runtime PM or system PM
5081 * Check the description of ufshcd_suspend() function for more details.
5083 * Returns 0 for success and non-zero for failure
5085 int ufshcd_system_suspend(struct ufs_hba
*hba
)
5089 if (!hba
|| !hba
->is_powered
)
5092 if (pm_runtime_suspended(hba
->dev
)) {
5093 if (hba
->rpm_lvl
== hba
->spm_lvl
)
5095 * There is possibility that device may still be in
5096 * active state during the runtime suspend.
5098 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba
->spm_lvl
) ==
5099 hba
->curr_dev_pwr_mode
) && !hba
->auto_bkops_enabled
)
5103 * UFS device and/or UFS link low power states during runtime
5104 * suspend seems to be different than what is expected during
5105 * system suspend. Hence runtime resume the devic & link and
5106 * let the system suspend low power states to take effect.
5107 * TODO: If resume takes longer time, we might have optimize
5108 * it in future by not resuming everything if possible.
5110 ret
= ufshcd_runtime_resume(hba
);
5115 ret
= ufshcd_suspend(hba
, UFS_SYSTEM_PM
);
5119 EXPORT_SYMBOL(ufshcd_system_suspend
);
5122 * ufshcd_system_resume - system resume routine
5123 * @hba: per adapter instance
5125 * Returns 0 for success and non-zero for failure
5128 int ufshcd_system_resume(struct ufs_hba
*hba
)
5130 if (!hba
|| !hba
->is_powered
|| pm_runtime_suspended(hba
->dev
))
5132 * Let the runtime resume take care of resuming
5133 * if runtime suspended.
5137 return ufshcd_resume(hba
, UFS_SYSTEM_PM
);
5139 EXPORT_SYMBOL(ufshcd_system_resume
);
5142 * ufshcd_runtime_suspend - runtime suspend routine
5143 * @hba: per adapter instance
5145 * Check the description of ufshcd_suspend() function for more details.
5147 * Returns 0 for success and non-zero for failure
5149 int ufshcd_runtime_suspend(struct ufs_hba
*hba
)
5151 if (!hba
|| !hba
->is_powered
)
5154 return ufshcd_suspend(hba
, UFS_RUNTIME_PM
);
5156 EXPORT_SYMBOL(ufshcd_runtime_suspend
);
5159 * ufshcd_runtime_resume - runtime resume routine
5160 * @hba: per adapter instance
5162 * This function basically brings the UFS device, UniPro link and controller
5163 * to active state. Following operations are done in this function:
5165 * 1. Turn on all the controller related clocks
5166 * 2. Bring the UniPro link out of Hibernate state
5167 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
5169 * 4. If auto-bkops is enabled on the device, disable it.
5171 * So following would be the possible power state after this function return
5173 * S1: UFS device in Active state with VCC rail ON
5174 * UniPro link in Active state
5175 * All the UFS/UniPro controller clocks are ON
5177 * Returns 0 for success and non-zero for failure
5179 int ufshcd_runtime_resume(struct ufs_hba
*hba
)
5181 if (!hba
|| !hba
->is_powered
)
5184 return ufshcd_resume(hba
, UFS_RUNTIME_PM
);
5186 EXPORT_SYMBOL(ufshcd_runtime_resume
);
5188 int ufshcd_runtime_idle(struct ufs_hba
*hba
)
5192 EXPORT_SYMBOL(ufshcd_runtime_idle
);
5195 * ufshcd_shutdown - shutdown routine
5196 * @hba: per adapter instance
5198 * This function would power off both UFS device and UFS link.
5200 * Returns 0 always to allow force shutdown even in case of errors.
5202 int ufshcd_shutdown(struct ufs_hba
*hba
)
5206 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
))
5209 if (pm_runtime_suspended(hba
->dev
)) {
5210 ret
= ufshcd_runtime_resume(hba
);
5215 ret
= ufshcd_suspend(hba
, UFS_SHUTDOWN_PM
);
5218 dev_err(hba
->dev
, "%s failed, err %d\n", __func__
, ret
);
5219 /* allow force shutdown even in case of errors */
5222 EXPORT_SYMBOL(ufshcd_shutdown
);
5225 * ufshcd_remove - de-allocate SCSI host and host memory space
5226 * data structure memory
5227 * @hba - per adapter instance
5229 void ufshcd_remove(struct ufs_hba
*hba
)
5231 scsi_remove_host(hba
->host
);
5232 ufshcd_scsi_remove_wlus(hba
);
5233 /* disable interrupts */
5234 ufshcd_disable_intr(hba
, hba
->intr_mask
);
5235 ufshcd_hba_stop(hba
);
5237 scsi_host_put(hba
->host
);
5239 ufshcd_exit_clk_gating(hba
);
5240 if (ufshcd_is_clkscaling_enabled(hba
))
5241 devfreq_remove_device(hba
->devfreq
);
5242 ufshcd_hba_exit(hba
);
5244 EXPORT_SYMBOL_GPL(ufshcd_remove
);
5247 * ufshcd_set_dma_mask - Set dma mask based on the controller
5248 * addressing capability
5249 * @hba: per adapter instance
5251 * Returns 0 for success, non-zero for failure
5253 static int ufshcd_set_dma_mask(struct ufs_hba
*hba
)
5255 if (hba
->capabilities
& MASK_64_ADDRESSING_SUPPORT
) {
5256 if (!dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(64)))
5259 return dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(32));
5263 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
5264 * @dev: pointer to device handle
5265 * @hba_handle: driver private handle
5266 * Returns 0 on success, non-zero value on failure
5268 int ufshcd_alloc_host(struct device
*dev
, struct ufs_hba
**hba_handle
)
5270 struct Scsi_Host
*host
;
5271 struct ufs_hba
*hba
;
5276 "Invalid memory reference for dev is NULL\n");
5281 host
= scsi_host_alloc(&ufshcd_driver_template
,
5282 sizeof(struct ufs_hba
));
5284 dev_err(dev
, "scsi_host_alloc failed\n");
5288 hba
= shost_priv(host
);
5296 EXPORT_SYMBOL(ufshcd_alloc_host
);
5298 static int ufshcd_scale_clks(struct ufs_hba
*hba
, bool scale_up
)
5301 struct ufs_clk_info
*clki
;
5302 struct list_head
*head
= &hba
->clk_list_head
;
5304 if (!head
|| list_empty(head
))
5307 list_for_each_entry(clki
, head
, list
) {
5308 if (!IS_ERR_OR_NULL(clki
->clk
)) {
5309 if (scale_up
&& clki
->max_freq
) {
5310 if (clki
->curr_freq
== clki
->max_freq
)
5312 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
5314 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
5315 __func__
, clki
->name
,
5316 clki
->max_freq
, ret
);
5319 clki
->curr_freq
= clki
->max_freq
;
5321 } else if (!scale_up
&& clki
->min_freq
) {
5322 if (clki
->curr_freq
== clki
->min_freq
)
5324 ret
= clk_set_rate(clki
->clk
, clki
->min_freq
);
5326 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
5327 __func__
, clki
->name
,
5328 clki
->min_freq
, ret
);
5331 clki
->curr_freq
= clki
->min_freq
;
5334 dev_dbg(hba
->dev
, "%s: clk: %s, rate: %lu\n", __func__
,
5335 clki
->name
, clk_get_rate(clki
->clk
));
5337 if (hba
->vops
->clk_scale_notify
)
5338 hba
->vops
->clk_scale_notify(hba
);
5343 static int ufshcd_devfreq_target(struct device
*dev
,
5344 unsigned long *freq
, u32 flags
)
5347 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
5349 if (!ufshcd_is_clkscaling_enabled(hba
))
5352 if (*freq
== UINT_MAX
)
5353 err
= ufshcd_scale_clks(hba
, true);
5354 else if (*freq
== 0)
5355 err
= ufshcd_scale_clks(hba
, false);
5360 static int ufshcd_devfreq_get_dev_status(struct device
*dev
,
5361 struct devfreq_dev_status
*stat
)
5363 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
5364 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
5365 unsigned long flags
;
5367 if (!ufshcd_is_clkscaling_enabled(hba
))
5370 memset(stat
, 0, sizeof(*stat
));
5372 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5373 if (!scaling
->window_start_t
)
5376 if (scaling
->is_busy_started
)
5377 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
5378 scaling
->busy_start_t
));
5380 stat
->total_time
= jiffies_to_usecs((long)jiffies
-
5381 (long)scaling
->window_start_t
);
5382 stat
->busy_time
= scaling
->tot_busy_t
;
5384 scaling
->window_start_t
= jiffies
;
5385 scaling
->tot_busy_t
= 0;
5387 if (hba
->outstanding_reqs
) {
5388 scaling
->busy_start_t
= ktime_get();
5389 scaling
->is_busy_started
= true;
5391 scaling
->busy_start_t
= ktime_set(0, 0);
5392 scaling
->is_busy_started
= false;
5394 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5398 static struct devfreq_dev_profile ufs_devfreq_profile
= {
5400 .target
= ufshcd_devfreq_target
,
5401 .get_dev_status
= ufshcd_devfreq_get_dev_status
,
5405 * ufshcd_init - Driver initialization routine
5406 * @hba: per-adapter instance
5407 * @mmio_base: base register address
5408 * @irq: Interrupt line of device
5409 * Returns 0 on success, non-zero value on failure
5411 int ufshcd_init(struct ufs_hba
*hba
, void __iomem
*mmio_base
, unsigned int irq
)
5414 struct Scsi_Host
*host
= hba
->host
;
5415 struct device
*dev
= hba
->dev
;
5419 "Invalid memory reference for mmio_base is NULL\n");
5424 hba
->mmio_base
= mmio_base
;
5427 err
= ufshcd_hba_init(hba
);
5431 /* Read capabilities registers */
5432 ufshcd_hba_capabilities(hba
);
5434 /* Get UFS version supported by the controller */
5435 hba
->ufs_version
= ufshcd_get_ufs_version(hba
);
5437 /* Get Interrupt bit mask per version */
5438 hba
->intr_mask
= ufshcd_get_intr_mask(hba
);
5440 err
= ufshcd_set_dma_mask(hba
);
5442 dev_err(hba
->dev
, "set dma mask failed\n");
5446 /* Allocate memory for host memory space */
5447 err
= ufshcd_memory_alloc(hba
);
5449 dev_err(hba
->dev
, "Memory allocation failed\n");
5454 ufshcd_host_memory_configure(hba
);
5456 host
->can_queue
= hba
->nutrs
;
5457 host
->cmd_per_lun
= hba
->nutrs
;
5458 host
->max_id
= UFSHCD_MAX_ID
;
5459 host
->max_lun
= UFS_MAX_LUNS
;
5460 host
->max_channel
= UFSHCD_MAX_CHANNEL
;
5461 host
->unique_id
= host
->host_no
;
5462 host
->max_cmd_len
= MAX_CDB_SIZE
;
5464 hba
->max_pwr_info
.is_valid
= false;
5466 /* Initailize wait queue for task management */
5467 init_waitqueue_head(&hba
->tm_wq
);
5468 init_waitqueue_head(&hba
->tm_tag_wq
);
5470 /* Initialize work queues */
5471 INIT_WORK(&hba
->eh_work
, ufshcd_err_handler
);
5472 INIT_WORK(&hba
->eeh_work
, ufshcd_exception_event_handler
);
5474 /* Initialize UIC command mutex */
5475 mutex_init(&hba
->uic_cmd_mutex
);
5477 /* Initialize mutex for device management commands */
5478 mutex_init(&hba
->dev_cmd
.lock
);
5480 /* Initialize device management tag acquire wait queue */
5481 init_waitqueue_head(&hba
->dev_cmd
.tag_wq
);
5483 ufshcd_init_clk_gating(hba
);
5484 /* IRQ registration */
5485 err
= devm_request_irq(dev
, irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
, hba
);
5487 dev_err(hba
->dev
, "request irq failed\n");
5490 hba
->is_irq_enabled
= true;
5493 /* Enable SCSI tag mapping */
5494 err
= scsi_init_shared_tag_map(host
, host
->can_queue
);
5496 dev_err(hba
->dev
, "init shared queue failed\n");
5500 err
= scsi_add_host(host
, hba
->dev
);
5502 dev_err(hba
->dev
, "scsi_add_host failed\n");
5506 /* Host controller enable */
5507 err
= ufshcd_hba_enable(hba
);
5509 dev_err(hba
->dev
, "Host controller enable failed\n");
5510 goto out_remove_scsi_host
;
5513 if (ufshcd_is_clkscaling_enabled(hba
)) {
5514 hba
->devfreq
= devfreq_add_device(dev
, &ufs_devfreq_profile
,
5515 "simple_ondemand", NULL
);
5516 if (IS_ERR(hba
->devfreq
)) {
5517 dev_err(hba
->dev
, "Unable to register with devfreq %ld\n",
5518 PTR_ERR(hba
->devfreq
));
5519 goto out_remove_scsi_host
;
5521 /* Suspend devfreq until the UFS device is detected */
5522 devfreq_suspend_device(hba
->devfreq
);
5523 hba
->clk_scaling
.window_start_t
= 0;
5526 /* Hold auto suspend until async scan completes */
5527 pm_runtime_get_sync(dev
);
5530 * The device-initialize-sequence hasn't been invoked yet.
5531 * Set the device to power-off state
5533 ufshcd_set_ufs_dev_poweroff(hba
);
5535 async_schedule(ufshcd_async_scan
, hba
);
5539 out_remove_scsi_host
:
5540 scsi_remove_host(hba
->host
);
5542 ufshcd_exit_clk_gating(hba
);
5544 hba
->is_irq_enabled
= false;
5545 scsi_host_put(host
);
5546 ufshcd_hba_exit(hba
);
5550 EXPORT_SYMBOL_GPL(ufshcd_init
);
5552 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
5553 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
5554 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
5555 MODULE_LICENSE("GPL");
5556 MODULE_VERSION(UFSHCD_DRIVER_VERSION
);