2 * Universal Flash Storage Host controller driver Core
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
40 #include <linux/async.h>
41 #include <linux/devfreq.h>
42 #include <linux/nls.h>
45 #include "ufs_quirks.h"
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/ufs.h>
51 #define UFSHCD_REQ_SENSE_SIZE 18
53 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
56 /* UIC command timeout, unit: ms */
57 #define UIC_CMD_TIMEOUT 500
59 /* NOP OUT retries waiting for NOP IN response */
60 #define NOP_OUT_RETRIES 10
61 /* Timeout after 30 msecs if NOP OUT hangs without response */
62 #define NOP_OUT_TIMEOUT 30 /* msecs */
64 /* Query request retries */
65 #define QUERY_REQ_RETRIES 3
66 /* Query request timeout */
67 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
69 /* Task management command timeout */
70 #define TM_CMD_TIMEOUT 100 /* msecs */
72 /* maximum number of retries for a general UIC command */
73 #define UFS_UIC_COMMAND_RETRIES 3
75 /* maximum number of link-startup retries */
76 #define DME_LINKSTARTUP_RETRIES 3
78 /* Maximum retries for Hibern8 enter */
79 #define UIC_HIBERN8_ENTER_RETRIES 3
81 /* maximum number of reset retries before giving up */
82 #define MAX_HOST_RESET_RETRIES 5
84 /* Expose the flag value from utp_upiu_query.value */
85 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
87 /* Interrupt aggregation default timeout, unit: 40us */
88 #define INT_AGGR_DEF_TO 0x02
90 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
94 _ret = ufshcd_enable_vreg(_dev, _vreg); \
96 _ret = ufshcd_disable_vreg(_dev, _vreg); \
100 #define ufshcd_hex_dump(prefix_str, buf, len) \
101 print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
104 UFSHCD_MAX_CHANNEL
= 0,
106 UFSHCD_CMD_PER_LUN
= 32,
107 UFSHCD_CAN_QUEUE
= 32,
114 UFSHCD_STATE_OPERATIONAL
,
115 UFSHCD_STATE_EH_SCHEDULED
,
118 /* UFSHCD error handling flags */
120 UFSHCD_EH_IN_PROGRESS
= (1 << 0),
123 /* UFSHCD UIC layer error flags */
125 UFSHCD_UIC_DL_PA_INIT_ERROR
= (1 << 0), /* Data link layer error */
126 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
= (1 << 1), /* Data link layer error */
127 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
= (1 << 2), /* Data link layer error */
128 UFSHCD_UIC_NL_ERROR
= (1 << 3), /* Network layer error */
129 UFSHCD_UIC_TL_ERROR
= (1 << 4), /* Transport Layer error */
130 UFSHCD_UIC_DME_ERROR
= (1 << 5), /* DME error */
133 /* Interrupt configuration options */
140 #define ufshcd_set_eh_in_progress(h) \
141 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
142 #define ufshcd_eh_in_progress(h) \
143 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
144 #define ufshcd_clear_eh_in_progress(h) \
145 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
147 #define ufshcd_set_ufs_dev_active(h) \
148 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
149 #define ufshcd_set_ufs_dev_sleep(h) \
150 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
151 #define ufshcd_set_ufs_dev_poweroff(h) \
152 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
153 #define ufshcd_is_ufs_dev_active(h) \
154 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
155 #define ufshcd_is_ufs_dev_sleep(h) \
156 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
157 #define ufshcd_is_ufs_dev_poweroff(h) \
158 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
160 static struct ufs_pm_lvl_states ufs_pm_lvl_states
[] = {
161 {UFS_ACTIVE_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
162 {UFS_ACTIVE_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
163 {UFS_SLEEP_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
164 {UFS_SLEEP_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
165 {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
166 {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_OFF_STATE
},
169 static inline enum ufs_dev_pwr_mode
170 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl
)
172 return ufs_pm_lvl_states
[lvl
].dev_state
;
175 static inline enum uic_link_state
176 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl
)
178 return ufs_pm_lvl_states
[lvl
].link_state
;
181 static inline enum ufs_pm_level
182 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state
,
183 enum uic_link_state link_state
)
185 enum ufs_pm_level lvl
;
187 for (lvl
= UFS_PM_LVL_0
; lvl
< UFS_PM_LVL_MAX
; lvl
++) {
188 if ((ufs_pm_lvl_states
[lvl
].dev_state
== dev_state
) &&
189 (ufs_pm_lvl_states
[lvl
].link_state
== link_state
))
193 /* if no match found, return the level 0 */
197 static struct ufs_dev_fix ufs_fixups
[] = {
198 /* UFS cards deviations table */
199 UFS_FIX(UFS_VENDOR_SAMSUNG
, UFS_ANY_MODEL
,
200 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
),
201 UFS_FIX(UFS_VENDOR_SAMSUNG
, UFS_ANY_MODEL
, UFS_DEVICE_NO_VCCQ
),
202 UFS_FIX(UFS_VENDOR_SAMSUNG
, UFS_ANY_MODEL
,
203 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
),
204 UFS_FIX(UFS_VENDOR_SAMSUNG
, UFS_ANY_MODEL
,
205 UFS_DEVICE_NO_FASTAUTO
),
206 UFS_FIX(UFS_VENDOR_SAMSUNG
, UFS_ANY_MODEL
,
207 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
),
208 UFS_FIX(UFS_VENDOR_TOSHIBA
, UFS_ANY_MODEL
,
209 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
),
210 UFS_FIX(UFS_VENDOR_TOSHIBA
, "THGLF2G9C8KBADG",
211 UFS_DEVICE_QUIRK_PA_TACTIVATE
),
212 UFS_FIX(UFS_VENDOR_TOSHIBA
, "THGLF2G9D8KBADG",
213 UFS_DEVICE_QUIRK_PA_TACTIVATE
),
214 UFS_FIX(UFS_VENDOR_SKHYNIX
, UFS_ANY_MODEL
, UFS_DEVICE_NO_VCCQ
),
215 UFS_FIX(UFS_VENDOR_SKHYNIX
, UFS_ANY_MODEL
,
216 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME
),
221 static void ufshcd_tmc_handler(struct ufs_hba
*hba
);
222 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
);
223 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
);
224 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
);
225 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
);
226 static void ufshcd_hba_exit(struct ufs_hba
*hba
);
227 static int ufshcd_probe_hba(struct ufs_hba
*hba
);
228 static int __ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
,
230 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
);
231 static int ufshcd_set_vccq_rail_unused(struct ufs_hba
*hba
, bool unused
);
232 static int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
);
233 static int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
);
234 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
);
235 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
);
236 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
);
237 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
238 static void __ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
239 static int ufshcd_scale_clks(struct ufs_hba
*hba
, bool scale_up
);
240 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
);
241 static int ufshcd_config_pwr_mode(struct ufs_hba
*hba
,
242 struct ufs_pa_layer_attr
*desired_pwr_mode
);
243 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
244 struct ufs_pa_layer_attr
*pwr_mode
);
245 static inline bool ufshcd_valid_tag(struct ufs_hba
*hba
, int tag
)
247 return tag
>= 0 && tag
< hba
->nutrs
;
250 static inline int ufshcd_enable_irq(struct ufs_hba
*hba
)
254 if (!hba
->is_irq_enabled
) {
255 ret
= request_irq(hba
->irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
,
258 dev_err(hba
->dev
, "%s: request_irq failed, ret=%d\n",
260 hba
->is_irq_enabled
= true;
266 static inline void ufshcd_disable_irq(struct ufs_hba
*hba
)
268 if (hba
->is_irq_enabled
) {
269 free_irq(hba
->irq
, hba
);
270 hba
->is_irq_enabled
= false;
274 /* replace non-printable or non-ASCII characters with spaces */
275 static inline void ufshcd_remove_non_printable(char *val
)
280 if (*val
< 0x20 || *val
> 0x7e)
284 static void ufshcd_add_command_trace(struct ufs_hba
*hba
,
285 unsigned int tag
, const char *str
)
290 struct ufshcd_lrb
*lrbp
;
291 int transfer_len
= -1;
293 if (!trace_ufshcd_command_enabled())
296 lrbp
= &hba
->lrb
[tag
];
298 if (lrbp
->cmd
) { /* data phase exists */
299 opcode
= (u8
)(*lrbp
->cmd
->cmnd
);
300 if ((opcode
== READ_10
) || (opcode
== WRITE_10
)) {
302 * Currently we only fully trace read(10) and write(10)
305 if (lrbp
->cmd
->request
&& lrbp
->cmd
->request
->bio
)
307 lrbp
->cmd
->request
->bio
->bi_iter
.bi_sector
;
308 transfer_len
= be32_to_cpu(
309 lrbp
->ucd_req_ptr
->sc
.exp_data_transfer_len
);
313 intr
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
314 doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
315 trace_ufshcd_command(dev_name(hba
->dev
), str
, tag
,
316 doorbell
, transfer_len
, intr
, lba
, opcode
);
319 static void ufshcd_print_clk_freqs(struct ufs_hba
*hba
)
321 struct ufs_clk_info
*clki
;
322 struct list_head
*head
= &hba
->clk_list_head
;
324 if (!head
|| list_empty(head
))
327 list_for_each_entry(clki
, head
, list
) {
328 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->min_freq
&&
330 dev_err(hba
->dev
, "clk: %s, rate: %u\n",
331 clki
->name
, clki
->curr_freq
);
335 static void ufshcd_print_uic_err_hist(struct ufs_hba
*hba
,
336 struct ufs_uic_err_reg_hist
*err_hist
, char *err_name
)
340 for (i
= 0; i
< UIC_ERR_REG_HIST_LENGTH
; i
++) {
341 int p
= (i
+ err_hist
->pos
- 1) % UIC_ERR_REG_HIST_LENGTH
;
343 if (err_hist
->reg
[p
] == 0)
345 dev_err(hba
->dev
, "%s[%d] = 0x%x at %lld us\n", err_name
, i
,
346 err_hist
->reg
[p
], ktime_to_us(err_hist
->tstamp
[p
]));
350 static void ufshcd_print_host_regs(struct ufs_hba
*hba
)
353 * hex_dump reads its data without the readl macro. This might
354 * cause inconsistency issues on some platform, as the printed
355 * values may be from cache and not the most recent value.
356 * To know whether you are looking at an un-cached version verify
357 * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
358 * during platform/pci probe function.
360 ufshcd_hex_dump("host regs: ", hba
->mmio_base
, UFSHCI_REG_SPACE_SIZE
);
361 dev_err(hba
->dev
, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
362 hba
->ufs_version
, hba
->capabilities
);
364 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
365 (u32
)hba
->outstanding_reqs
, (u32
)hba
->outstanding_tasks
);
367 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
368 ktime_to_us(hba
->ufs_stats
.last_hibern8_exit_tstamp
),
369 hba
->ufs_stats
.hibern8_exit_cnt
);
371 ufshcd_print_uic_err_hist(hba
, &hba
->ufs_stats
.pa_err
, "pa_err");
372 ufshcd_print_uic_err_hist(hba
, &hba
->ufs_stats
.dl_err
, "dl_err");
373 ufshcd_print_uic_err_hist(hba
, &hba
->ufs_stats
.nl_err
, "nl_err");
374 ufshcd_print_uic_err_hist(hba
, &hba
->ufs_stats
.tl_err
, "tl_err");
375 ufshcd_print_uic_err_hist(hba
, &hba
->ufs_stats
.dme_err
, "dme_err");
377 ufshcd_print_clk_freqs(hba
);
379 if (hba
->vops
&& hba
->vops
->dbg_register_dump
)
380 hba
->vops
->dbg_register_dump(hba
);
384 void ufshcd_print_trs(struct ufs_hba
*hba
, unsigned long bitmap
, bool pr_prdt
)
386 struct ufshcd_lrb
*lrbp
;
390 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
391 lrbp
= &hba
->lrb
[tag
];
393 dev_err(hba
->dev
, "UPIU[%d] - issue time %lld us\n",
394 tag
, ktime_to_us(lrbp
->issue_time_stamp
));
396 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
397 tag
, (u64
)lrbp
->utrd_dma_addr
);
399 ufshcd_hex_dump("UPIU TRD: ", lrbp
->utr_descriptor_ptr
,
400 sizeof(struct utp_transfer_req_desc
));
401 dev_err(hba
->dev
, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag
,
402 (u64
)lrbp
->ucd_req_dma_addr
);
403 ufshcd_hex_dump("UPIU REQ: ", lrbp
->ucd_req_ptr
,
404 sizeof(struct utp_upiu_req
));
405 dev_err(hba
->dev
, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag
,
406 (u64
)lrbp
->ucd_rsp_dma_addr
);
407 ufshcd_hex_dump("UPIU RSP: ", lrbp
->ucd_rsp_ptr
,
408 sizeof(struct utp_upiu_rsp
));
410 prdt_length
= le16_to_cpu(
411 lrbp
->utr_descriptor_ptr
->prd_table_length
);
413 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
415 (u64
)lrbp
->ucd_prdt_dma_addr
);
418 ufshcd_hex_dump("UPIU PRDT: ", lrbp
->ucd_prdt_ptr
,
419 sizeof(struct ufshcd_sg_entry
) * prdt_length
);
423 static void ufshcd_print_tmrs(struct ufs_hba
*hba
, unsigned long bitmap
)
425 struct utp_task_req_desc
*tmrdp
;
428 for_each_set_bit(tag
, &bitmap
, hba
->nutmrs
) {
429 tmrdp
= &hba
->utmrdl_base_addr
[tag
];
430 dev_err(hba
->dev
, "TM[%d] - Task Management Header\n", tag
);
431 ufshcd_hex_dump("TM TRD: ", &tmrdp
->header
,
432 sizeof(struct request_desc_header
));
433 dev_err(hba
->dev
, "TM[%d] - Task Management Request UPIU\n",
435 ufshcd_hex_dump("TM REQ: ", tmrdp
->task_req_upiu
,
436 sizeof(struct utp_upiu_req
));
437 dev_err(hba
->dev
, "TM[%d] - Task Management Response UPIU\n",
439 ufshcd_hex_dump("TM RSP: ", tmrdp
->task_rsp_upiu
,
440 sizeof(struct utp_task_req_desc
));
444 static void ufshcd_print_host_state(struct ufs_hba
*hba
)
446 dev_err(hba
->dev
, "UFS Host state=%d\n", hba
->ufshcd_state
);
447 dev_err(hba
->dev
, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
448 hba
->lrb_in_use
, hba
->outstanding_tasks
, hba
->outstanding_reqs
);
449 dev_err(hba
->dev
, "saved_err=0x%x, saved_uic_err=0x%x\n",
450 hba
->saved_err
, hba
->saved_uic_err
);
451 dev_err(hba
->dev
, "Device power mode=%d, UIC link state=%d\n",
452 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
453 dev_err(hba
->dev
, "PM in progress=%d, sys. suspended=%d\n",
454 hba
->pm_op_in_progress
, hba
->is_sys_suspended
);
455 dev_err(hba
->dev
, "Auto BKOPS=%d, Host self-block=%d\n",
456 hba
->auto_bkops_enabled
, hba
->host
->host_self_blocked
);
457 dev_err(hba
->dev
, "Clk gate=%d\n", hba
->clk_gating
.state
);
458 dev_err(hba
->dev
, "error handling flags=0x%x, req. abort count=%d\n",
459 hba
->eh_flags
, hba
->req_abort_count
);
460 dev_err(hba
->dev
, "Host capabilities=0x%x, caps=0x%x\n",
461 hba
->capabilities
, hba
->caps
);
462 dev_err(hba
->dev
, "quirks=0x%x, dev. quirks=0x%x\n", hba
->quirks
,
467 * ufshcd_print_pwr_info - print power params as saved in hba
469 * @hba: per-adapter instance
471 static void ufshcd_print_pwr_info(struct ufs_hba
*hba
)
473 static const char * const names
[] = {
483 dev_err(hba
->dev
, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
485 hba
->pwr_info
.gear_rx
, hba
->pwr_info
.gear_tx
,
486 hba
->pwr_info
.lane_rx
, hba
->pwr_info
.lane_tx
,
487 names
[hba
->pwr_info
.pwr_rx
],
488 names
[hba
->pwr_info
.pwr_tx
],
489 hba
->pwr_info
.hs_rate
);
493 * ufshcd_wait_for_register - wait for register value to change
494 * @hba - per-adapter interface
495 * @reg - mmio register offset
496 * @mask - mask to apply to read register value
497 * @val - wait condition
498 * @interval_us - polling interval in microsecs
499 * @timeout_ms - timeout in millisecs
500 * @can_sleep - perform sleep or just spin
502 * Returns -ETIMEDOUT on error, zero on success
504 int ufshcd_wait_for_register(struct ufs_hba
*hba
, u32 reg
, u32 mask
,
505 u32 val
, unsigned long interval_us
,
506 unsigned long timeout_ms
, bool can_sleep
)
509 unsigned long timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
);
511 /* ignore bits that we don't intend to wait on */
514 while ((ufshcd_readl(hba
, reg
) & mask
) != val
) {
516 usleep_range(interval_us
, interval_us
+ 50);
519 if (time_after(jiffies
, timeout
)) {
520 if ((ufshcd_readl(hba
, reg
) & mask
) != val
)
530 * ufshcd_get_intr_mask - Get the interrupt bit mask
531 * @hba - Pointer to adapter instance
533 * Returns interrupt bit mask per version
535 static inline u32
ufshcd_get_intr_mask(struct ufs_hba
*hba
)
539 switch (hba
->ufs_version
) {
540 case UFSHCI_VERSION_10
:
541 intr_mask
= INTERRUPT_MASK_ALL_VER_10
;
543 /* allow fall through */
544 case UFSHCI_VERSION_11
:
545 case UFSHCI_VERSION_20
:
546 intr_mask
= INTERRUPT_MASK_ALL_VER_11
;
548 /* allow fall through */
549 case UFSHCI_VERSION_21
:
551 intr_mask
= INTERRUPT_MASK_ALL_VER_21
;
558 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
559 * @hba - Pointer to adapter instance
561 * Returns UFSHCI version supported by the controller
563 static inline u32
ufshcd_get_ufs_version(struct ufs_hba
*hba
)
565 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION
)
566 return ufshcd_vops_get_ufs_hci_version(hba
);
568 return ufshcd_readl(hba
, REG_UFS_VERSION
);
572 * ufshcd_is_device_present - Check if any device connected to
573 * the host controller
574 * @hba: pointer to adapter instance
576 * Returns 1 if device present, 0 if no device detected
578 static inline int ufshcd_is_device_present(struct ufs_hba
*hba
)
580 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) &
581 DEVICE_PRESENT
) ? 1 : 0;
585 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
586 * @lrb: pointer to local command reference block
588 * This function is used to get the OCS field from UTRD
589 * Returns the OCS field in the UTRD
591 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb
*lrbp
)
593 return le32_to_cpu(lrbp
->utr_descriptor_ptr
->header
.dword_2
) & MASK_OCS
;
597 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
598 * @task_req_descp: pointer to utp_task_req_desc structure
600 * This function is used to get the OCS field from UTMRD
601 * Returns the OCS field in the UTMRD
604 ufshcd_get_tmr_ocs(struct utp_task_req_desc
*task_req_descp
)
606 return le32_to_cpu(task_req_descp
->header
.dword_2
) & MASK_OCS
;
610 * ufshcd_get_tm_free_slot - get a free slot for task management request
611 * @hba: per adapter instance
612 * @free_slot: pointer to variable with available slot value
614 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
615 * Returns 0 if free slot is not available, else return 1 with tag value
618 static bool ufshcd_get_tm_free_slot(struct ufs_hba
*hba
, int *free_slot
)
627 tag
= find_first_zero_bit(&hba
->tm_slots_in_use
, hba
->nutmrs
);
628 if (tag
>= hba
->nutmrs
)
630 } while (test_and_set_bit_lock(tag
, &hba
->tm_slots_in_use
));
638 static inline void ufshcd_put_tm_slot(struct ufs_hba
*hba
, int slot
)
640 clear_bit_unlock(slot
, &hba
->tm_slots_in_use
);
644 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
645 * @hba: per adapter instance
646 * @pos: position of the bit to be cleared
648 static inline void ufshcd_utrl_clear(struct ufs_hba
*hba
, u32 pos
)
650 ufshcd_writel(hba
, ~(1 << pos
), REG_UTP_TRANSFER_REQ_LIST_CLEAR
);
654 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
655 * @hba: per adapter instance
656 * @tag: position of the bit to be cleared
658 static inline void ufshcd_outstanding_req_clear(struct ufs_hba
*hba
, int tag
)
660 __clear_bit(tag
, &hba
->outstanding_reqs
);
664 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
665 * @reg: Register value of host controller status
667 * Returns integer, 0 on Success and positive value if failed
669 static inline int ufshcd_get_lists_status(u32 reg
)
672 * The mask 0xFF is for the following HCS register bits
680 return ((reg
& 0xFF) >> 1) ^ 0x07;
684 * ufshcd_get_uic_cmd_result - Get the UIC command result
685 * @hba: Pointer to adapter instance
687 * This function gets the result of UIC command completion
688 * Returns 0 on success, non zero value on error
690 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba
*hba
)
692 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
) &
693 MASK_UIC_COMMAND_RESULT
;
697 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
698 * @hba: Pointer to adapter instance
700 * This function gets UIC command argument3
701 * Returns 0 on success, non zero value on error
703 static inline u32
ufshcd_get_dme_attr_val(struct ufs_hba
*hba
)
705 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
);
709 * ufshcd_get_req_rsp - returns the TR response transaction type
710 * @ucd_rsp_ptr: pointer to response UPIU
713 ufshcd_get_req_rsp(struct utp_upiu_rsp
*ucd_rsp_ptr
)
715 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_0
) >> 24;
719 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
720 * @ucd_rsp_ptr: pointer to response UPIU
722 * This function gets the response status and scsi_status from response UPIU
723 * Returns the response result code.
726 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp
*ucd_rsp_ptr
)
728 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_1
) & MASK_RSP_UPIU_RESULT
;
732 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
734 * @ucd_rsp_ptr: pointer to response UPIU
736 * Return the data segment length.
738 static inline unsigned int
739 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp
*ucd_rsp_ptr
)
741 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_2
) &
742 MASK_RSP_UPIU_DATA_SEG_LEN
;
746 * ufshcd_is_exception_event - Check if the device raised an exception event
747 * @ucd_rsp_ptr: pointer to response UPIU
749 * The function checks if the device raised an exception event indicated in
750 * the Device Information field of response UPIU.
752 * Returns true if exception is raised, false otherwise.
754 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp
*ucd_rsp_ptr
)
756 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_2
) &
757 MASK_RSP_EXCEPTION_EVENT
? true : false;
761 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
762 * @hba: per adapter instance
765 ufshcd_reset_intr_aggr(struct ufs_hba
*hba
)
767 ufshcd_writel(hba
, INT_AGGR_ENABLE
|
768 INT_AGGR_COUNTER_AND_TIMER_RESET
,
769 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
773 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
774 * @hba: per adapter instance
775 * @cnt: Interrupt aggregation counter threshold
776 * @tmout: Interrupt aggregation timeout value
779 ufshcd_config_intr_aggr(struct ufs_hba
*hba
, u8 cnt
, u8 tmout
)
781 ufshcd_writel(hba
, INT_AGGR_ENABLE
| INT_AGGR_PARAM_WRITE
|
782 INT_AGGR_COUNTER_THLD_VAL(cnt
) |
783 INT_AGGR_TIMEOUT_VAL(tmout
),
784 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
788 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
789 * @hba: per adapter instance
791 static inline void ufshcd_disable_intr_aggr(struct ufs_hba
*hba
)
793 ufshcd_writel(hba
, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
797 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
798 * When run-stop registers are set to 1, it indicates the
799 * host controller that it can process the requests
800 * @hba: per adapter instance
802 static void ufshcd_enable_run_stop_reg(struct ufs_hba
*hba
)
804 ufshcd_writel(hba
, UTP_TASK_REQ_LIST_RUN_STOP_BIT
,
805 REG_UTP_TASK_REQ_LIST_RUN_STOP
);
806 ufshcd_writel(hba
, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT
,
807 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP
);
811 * ufshcd_hba_start - Start controller initialization sequence
812 * @hba: per adapter instance
814 static inline void ufshcd_hba_start(struct ufs_hba
*hba
)
816 ufshcd_writel(hba
, CONTROLLER_ENABLE
, REG_CONTROLLER_ENABLE
);
820 * ufshcd_is_hba_active - Get controller state
821 * @hba: per adapter instance
823 * Returns zero if controller is active, 1 otherwise
825 static inline int ufshcd_is_hba_active(struct ufs_hba
*hba
)
827 return (ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
) & 0x1) ? 0 : 1;
830 static const char *ufschd_uic_link_state_to_string(
831 enum uic_link_state state
)
834 case UIC_LINK_OFF_STATE
: return "OFF";
835 case UIC_LINK_ACTIVE_STATE
: return "ACTIVE";
836 case UIC_LINK_HIBERN8_STATE
: return "HIBERN8";
837 default: return "UNKNOWN";
841 static const char *ufschd_ufs_dev_pwr_mode_to_string(
842 enum ufs_dev_pwr_mode state
)
845 case UFS_ACTIVE_PWR_MODE
: return "ACTIVE";
846 case UFS_SLEEP_PWR_MODE
: return "SLEEP";
847 case UFS_POWERDOWN_PWR_MODE
: return "POWERDOWN";
848 default: return "UNKNOWN";
852 u32
ufshcd_get_local_unipro_ver(struct ufs_hba
*hba
)
854 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
855 if ((hba
->ufs_version
== UFSHCI_VERSION_10
) ||
856 (hba
->ufs_version
== UFSHCI_VERSION_11
))
857 return UFS_UNIPRO_VER_1_41
;
859 return UFS_UNIPRO_VER_1_6
;
861 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver
);
863 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba
*hba
)
866 * If both host and device support UniPro ver1.6 or later, PA layer
867 * parameters tuning happens during link startup itself.
869 * We can manually tune PA layer parameters if either host or device
870 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
871 * logic simple, we will only do manual tuning if local unipro version
872 * doesn't support ver1.6 or later.
874 if (ufshcd_get_local_unipro_ver(hba
) < UFS_UNIPRO_VER_1_6
)
880 static int ufshcd_scale_clks(struct ufs_hba
*hba
, bool scale_up
)
883 struct ufs_clk_info
*clki
;
884 struct list_head
*head
= &hba
->clk_list_head
;
885 ktime_t start
= ktime_get();
886 bool clk_state_changed
= false;
888 if (!head
|| list_empty(head
))
891 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, PRE_CHANGE
);
895 list_for_each_entry(clki
, head
, list
) {
896 if (!IS_ERR_OR_NULL(clki
->clk
)) {
897 if (scale_up
&& clki
->max_freq
) {
898 if (clki
->curr_freq
== clki
->max_freq
)
901 clk_state_changed
= true;
902 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
904 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
905 __func__
, clki
->name
,
906 clki
->max_freq
, ret
);
909 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
910 "scaled up", clki
->name
,
914 clki
->curr_freq
= clki
->max_freq
;
916 } else if (!scale_up
&& clki
->min_freq
) {
917 if (clki
->curr_freq
== clki
->min_freq
)
920 clk_state_changed
= true;
921 ret
= clk_set_rate(clki
->clk
, clki
->min_freq
);
923 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
924 __func__
, clki
->name
,
925 clki
->min_freq
, ret
);
928 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
929 "scaled down", clki
->name
,
932 clki
->curr_freq
= clki
->min_freq
;
935 dev_dbg(hba
->dev
, "%s: clk: %s, rate: %lu\n", __func__
,
936 clki
->name
, clk_get_rate(clki
->clk
));
939 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, POST_CHANGE
);
942 if (clk_state_changed
)
943 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
944 (scale_up
? "up" : "down"),
945 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
950 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
951 * @hba: per adapter instance
952 * @scale_up: True if scaling up and false if scaling down
954 * Returns true if scaling is required, false otherwise.
956 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba
*hba
,
959 struct ufs_clk_info
*clki
;
960 struct list_head
*head
= &hba
->clk_list_head
;
962 if (!head
|| list_empty(head
))
965 list_for_each_entry(clki
, head
, list
) {
966 if (!IS_ERR_OR_NULL(clki
->clk
)) {
967 if (scale_up
&& clki
->max_freq
) {
968 if (clki
->curr_freq
== clki
->max_freq
)
971 } else if (!scale_up
&& clki
->min_freq
) {
972 if (clki
->curr_freq
== clki
->min_freq
)
982 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba
*hba
,
989 bool timeout
= false, do_last_check
= false;
992 ufshcd_hold(hba
, false);
993 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
995 * Wait for all the outstanding tasks/transfer requests.
996 * Verify by checking the doorbell registers are clear.
1000 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
) {
1005 tm_doorbell
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
1006 tr_doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
1007 if (!tm_doorbell
&& !tr_doorbell
) {
1010 } else if (do_last_check
) {
1014 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1016 if (ktime_to_us(ktime_sub(ktime_get(), start
)) >
1020 * We might have scheduled out for long time so make
1021 * sure to check if doorbells are cleared by this time
1024 do_last_check
= true;
1026 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1027 } while (tm_doorbell
|| tr_doorbell
);
1031 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1032 __func__
, tm_doorbell
, tr_doorbell
);
1036 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1037 ufshcd_release(hba
);
1042 * ufshcd_scale_gear - scale up/down UFS gear
1043 * @hba: per adapter instance
1044 * @scale_up: True for scaling up gear and false for scaling down
1046 * Returns 0 for success,
1047 * Returns -EBUSY if scaling can't happen at this time
1048 * Returns non-zero for any other errors
1050 static int ufshcd_scale_gear(struct ufs_hba
*hba
, bool scale_up
)
1052 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1054 struct ufs_pa_layer_attr new_pwr_info
;
1057 memcpy(&new_pwr_info
, &hba
->clk_scaling
.saved_pwr_info
.info
,
1058 sizeof(struct ufs_pa_layer_attr
));
1060 memcpy(&new_pwr_info
, &hba
->pwr_info
,
1061 sizeof(struct ufs_pa_layer_attr
));
1063 if (hba
->pwr_info
.gear_tx
> UFS_MIN_GEAR_TO_SCALE_DOWN
1064 || hba
->pwr_info
.gear_rx
> UFS_MIN_GEAR_TO_SCALE_DOWN
) {
1065 /* save the current power mode */
1066 memcpy(&hba
->clk_scaling
.saved_pwr_info
.info
,
1068 sizeof(struct ufs_pa_layer_attr
));
1070 /* scale down gear */
1071 new_pwr_info
.gear_tx
= UFS_MIN_GEAR_TO_SCALE_DOWN
;
1072 new_pwr_info
.gear_rx
= UFS_MIN_GEAR_TO_SCALE_DOWN
;
1076 /* check if the power mode needs to be changed or not? */
1077 ret
= ufshcd_change_power_mode(hba
, &new_pwr_info
);
1080 dev_err(hba
->dev
, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1082 hba
->pwr_info
.gear_tx
, hba
->pwr_info
.gear_rx
,
1083 new_pwr_info
.gear_tx
, new_pwr_info
.gear_rx
);
1088 static int ufshcd_clock_scaling_prepare(struct ufs_hba
*hba
)
1090 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1093 * make sure that there are no outstanding requests when
1094 * clock scaling is in progress
1096 scsi_block_requests(hba
->host
);
1097 down_write(&hba
->clk_scaling_lock
);
1098 if (ufshcd_wait_for_doorbell_clr(hba
, DOORBELL_CLR_TOUT_US
)) {
1100 up_write(&hba
->clk_scaling_lock
);
1101 scsi_unblock_requests(hba
->host
);
1107 static void ufshcd_clock_scaling_unprepare(struct ufs_hba
*hba
)
1109 up_write(&hba
->clk_scaling_lock
);
1110 scsi_unblock_requests(hba
->host
);
1114 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1115 * @hba: per adapter instance
1116 * @scale_up: True for scaling up and false for scalin down
1118 * Returns 0 for success,
1119 * Returns -EBUSY if scaling can't happen at this time
1120 * Returns non-zero for any other errors
1122 static int ufshcd_devfreq_scale(struct ufs_hba
*hba
, bool scale_up
)
1126 /* let's not get into low power until clock scaling is completed */
1127 ufshcd_hold(hba
, false);
1129 ret
= ufshcd_clock_scaling_prepare(hba
);
1133 /* scale down the gear before scaling down clocks */
1135 ret
= ufshcd_scale_gear(hba
, false);
1140 ret
= ufshcd_scale_clks(hba
, scale_up
);
1143 ufshcd_scale_gear(hba
, true);
1147 /* scale up the gear after scaling up clocks */
1149 ret
= ufshcd_scale_gear(hba
, true);
1151 ufshcd_scale_clks(hba
, false);
1156 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, POST_CHANGE
);
1159 ufshcd_clock_scaling_unprepare(hba
);
1160 ufshcd_release(hba
);
1164 static void ufshcd_clk_scaling_suspend_work(struct work_struct
*work
)
1166 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1167 clk_scaling
.suspend_work
);
1168 unsigned long irq_flags
;
1170 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1171 if (hba
->clk_scaling
.active_reqs
|| hba
->clk_scaling
.is_suspended
) {
1172 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1175 hba
->clk_scaling
.is_suspended
= true;
1176 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1178 __ufshcd_suspend_clkscaling(hba
);
1181 static void ufshcd_clk_scaling_resume_work(struct work_struct
*work
)
1183 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1184 clk_scaling
.resume_work
);
1185 unsigned long irq_flags
;
1187 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1188 if (!hba
->clk_scaling
.is_suspended
) {
1189 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1192 hba
->clk_scaling
.is_suspended
= false;
1193 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1195 devfreq_resume_device(hba
->devfreq
);
1198 static int ufshcd_devfreq_target(struct device
*dev
,
1199 unsigned long *freq
, u32 flags
)
1202 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1204 bool scale_up
, sched_clk_scaling_suspend_work
= false;
1205 unsigned long irq_flags
;
1207 if (!ufshcd_is_clkscaling_supported(hba
))
1210 if ((*freq
> 0) && (*freq
< UINT_MAX
)) {
1211 dev_err(hba
->dev
, "%s: invalid freq = %lu\n", __func__
, *freq
);
1215 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1216 if (ufshcd_eh_in_progress(hba
)) {
1217 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1221 if (!hba
->clk_scaling
.active_reqs
)
1222 sched_clk_scaling_suspend_work
= true;
1224 scale_up
= (*freq
== UINT_MAX
) ? true : false;
1225 if (!ufshcd_is_devfreq_scaling_required(hba
, scale_up
)) {
1226 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1228 goto out
; /* no state change required */
1230 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1232 start
= ktime_get();
1233 ret
= ufshcd_devfreq_scale(hba
, scale_up
);
1235 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1236 (scale_up
? "up" : "down"),
1237 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1240 if (sched_clk_scaling_suspend_work
)
1241 queue_work(hba
->clk_scaling
.workq
,
1242 &hba
->clk_scaling
.suspend_work
);
1248 static int ufshcd_devfreq_get_dev_status(struct device
*dev
,
1249 struct devfreq_dev_status
*stat
)
1251 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1252 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
1253 unsigned long flags
;
1255 if (!ufshcd_is_clkscaling_supported(hba
))
1258 memset(stat
, 0, sizeof(*stat
));
1260 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1261 if (!scaling
->window_start_t
)
1264 if (scaling
->is_busy_started
)
1265 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
1266 scaling
->busy_start_t
));
1268 stat
->total_time
= jiffies_to_usecs((long)jiffies
-
1269 (long)scaling
->window_start_t
);
1270 stat
->busy_time
= scaling
->tot_busy_t
;
1272 scaling
->window_start_t
= jiffies
;
1273 scaling
->tot_busy_t
= 0;
1275 if (hba
->outstanding_reqs
) {
1276 scaling
->busy_start_t
= ktime_get();
1277 scaling
->is_busy_started
= true;
1279 scaling
->busy_start_t
= 0;
1280 scaling
->is_busy_started
= false;
1282 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1286 static struct devfreq_dev_profile ufs_devfreq_profile
= {
1288 .target
= ufshcd_devfreq_target
,
1289 .get_dev_status
= ufshcd_devfreq_get_dev_status
,
1292 static void __ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1294 unsigned long flags
;
1296 devfreq_suspend_device(hba
->devfreq
);
1297 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1298 hba
->clk_scaling
.window_start_t
= 0;
1299 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1302 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1304 unsigned long flags
;
1305 bool suspend
= false;
1307 if (!ufshcd_is_clkscaling_supported(hba
))
1310 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1311 if (!hba
->clk_scaling
.is_suspended
) {
1313 hba
->clk_scaling
.is_suspended
= true;
1315 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1318 __ufshcd_suspend_clkscaling(hba
);
1321 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
)
1323 unsigned long flags
;
1324 bool resume
= false;
1326 if (!ufshcd_is_clkscaling_supported(hba
))
1329 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1330 if (hba
->clk_scaling
.is_suspended
) {
1332 hba
->clk_scaling
.is_suspended
= false;
1334 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1337 devfreq_resume_device(hba
->devfreq
);
1340 static ssize_t
ufshcd_clkscale_enable_show(struct device
*dev
,
1341 struct device_attribute
*attr
, char *buf
)
1343 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1345 return snprintf(buf
, PAGE_SIZE
, "%d\n", hba
->clk_scaling
.is_allowed
);
1348 static ssize_t
ufshcd_clkscale_enable_store(struct device
*dev
,
1349 struct device_attribute
*attr
, const char *buf
, size_t count
)
1351 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1355 if (kstrtou32(buf
, 0, &value
))
1359 if (value
== hba
->clk_scaling
.is_allowed
)
1362 pm_runtime_get_sync(hba
->dev
);
1363 ufshcd_hold(hba
, false);
1365 cancel_work_sync(&hba
->clk_scaling
.suspend_work
);
1366 cancel_work_sync(&hba
->clk_scaling
.resume_work
);
1368 hba
->clk_scaling
.is_allowed
= value
;
1371 ufshcd_resume_clkscaling(hba
);
1373 ufshcd_suspend_clkscaling(hba
);
1374 err
= ufshcd_devfreq_scale(hba
, true);
1376 dev_err(hba
->dev
, "%s: failed to scale clocks up %d\n",
1380 ufshcd_release(hba
);
1381 pm_runtime_put_sync(hba
->dev
);
1386 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba
*hba
)
1388 hba
->clk_scaling
.enable_attr
.show
= ufshcd_clkscale_enable_show
;
1389 hba
->clk_scaling
.enable_attr
.store
= ufshcd_clkscale_enable_store
;
1390 sysfs_attr_init(&hba
->clk_scaling
.enable_attr
.attr
);
1391 hba
->clk_scaling
.enable_attr
.attr
.name
= "clkscale_enable";
1392 hba
->clk_scaling
.enable_attr
.attr
.mode
= 0644;
1393 if (device_create_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
))
1394 dev_err(hba
->dev
, "Failed to create sysfs for clkscale_enable\n");
1397 static void ufshcd_ungate_work(struct work_struct
*work
)
1400 unsigned long flags
;
1401 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1402 clk_gating
.ungate_work
);
1404 cancel_delayed_work_sync(&hba
->clk_gating
.gate_work
);
1406 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1407 if (hba
->clk_gating
.state
== CLKS_ON
) {
1408 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1412 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1413 ufshcd_setup_clocks(hba
, true);
1415 /* Exit from hibern8 */
1416 if (ufshcd_can_hibern8_during_gating(hba
)) {
1417 /* Prevent gating in this path */
1418 hba
->clk_gating
.is_suspended
= true;
1419 if (ufshcd_is_link_hibern8(hba
)) {
1420 ret
= ufshcd_uic_hibern8_exit(hba
);
1422 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
1425 ufshcd_set_link_active(hba
);
1427 hba
->clk_gating
.is_suspended
= false;
1430 scsi_unblock_requests(hba
->host
);
1434 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1435 * Also, exit from hibern8 mode and set the link as active.
1436 * @hba: per adapter instance
1437 * @async: This indicates whether caller should ungate clocks asynchronously.
1439 int ufshcd_hold(struct ufs_hba
*hba
, bool async
)
1442 unsigned long flags
;
1444 if (!ufshcd_is_clkgating_allowed(hba
))
1446 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1447 hba
->clk_gating
.active_reqs
++;
1449 if (ufshcd_eh_in_progress(hba
)) {
1450 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1455 switch (hba
->clk_gating
.state
) {
1458 * Wait for the ungate work to complete if in progress.
1459 * Though the clocks may be in ON state, the link could
1460 * still be in hibner8 state if hibern8 is allowed
1461 * during clock gating.
1462 * Make sure we exit hibern8 state also in addition to
1465 if (ufshcd_can_hibern8_during_gating(hba
) &&
1466 ufshcd_is_link_hibern8(hba
)) {
1467 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1468 flush_work(&hba
->clk_gating
.ungate_work
);
1469 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1474 if (cancel_delayed_work(&hba
->clk_gating
.gate_work
)) {
1475 hba
->clk_gating
.state
= CLKS_ON
;
1476 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1477 hba
->clk_gating
.state
);
1481 * If we here, it means gating work is either done or
1482 * currently running. Hence, fall through to cancel gating
1483 * work and to enable clocks.
1486 scsi_block_requests(hba
->host
);
1487 hba
->clk_gating
.state
= REQ_CLKS_ON
;
1488 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1489 hba
->clk_gating
.state
);
1490 schedule_work(&hba
->clk_gating
.ungate_work
);
1492 * fall through to check if we should wait for this
1493 * work to be done or not.
1498 hba
->clk_gating
.active_reqs
--;
1502 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1503 flush_work(&hba
->clk_gating
.ungate_work
);
1504 /* Make sure state is CLKS_ON before returning */
1505 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1508 dev_err(hba
->dev
, "%s: clk gating is in invalid state %d\n",
1509 __func__
, hba
->clk_gating
.state
);
1512 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1516 EXPORT_SYMBOL_GPL(ufshcd_hold
);
1518 static void ufshcd_gate_work(struct work_struct
*work
)
1520 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1521 clk_gating
.gate_work
.work
);
1522 unsigned long flags
;
1524 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1526 * In case you are here to cancel this work the gating state
1527 * would be marked as REQ_CLKS_ON. In this case save time by
1528 * skipping the gating work and exit after changing the clock
1531 if (hba
->clk_gating
.is_suspended
||
1532 (hba
->clk_gating
.state
== REQ_CLKS_ON
)) {
1533 hba
->clk_gating
.state
= CLKS_ON
;
1534 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1535 hba
->clk_gating
.state
);
1539 if (hba
->clk_gating
.active_reqs
1540 || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
1541 || hba
->lrb_in_use
|| hba
->outstanding_tasks
1542 || hba
->active_uic_cmd
|| hba
->uic_async_done
)
1545 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1547 /* put the link into hibern8 mode before turning off clocks */
1548 if (ufshcd_can_hibern8_during_gating(hba
)) {
1549 if (ufshcd_uic_hibern8_enter(hba
)) {
1550 hba
->clk_gating
.state
= CLKS_ON
;
1551 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1552 hba
->clk_gating
.state
);
1555 ufshcd_set_link_hibern8(hba
);
1558 if (!ufshcd_is_link_active(hba
))
1559 ufshcd_setup_clocks(hba
, false);
1561 /* If link is active, device ref_clk can't be switched off */
1562 __ufshcd_setup_clocks(hba
, false, true);
1565 * In case you are here to cancel this work the gating state
1566 * would be marked as REQ_CLKS_ON. In this case keep the state
1567 * as REQ_CLKS_ON which would anyway imply that clocks are off
1568 * and a request to turn them on is pending. By doing this way,
1569 * we keep the state machine in tact and this would ultimately
1570 * prevent from doing cancel work multiple times when there are
1571 * new requests arriving before the current cancel work is done.
1573 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1574 if (hba
->clk_gating
.state
== REQ_CLKS_OFF
) {
1575 hba
->clk_gating
.state
= CLKS_OFF
;
1576 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1577 hba
->clk_gating
.state
);
1580 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1585 /* host lock must be held before calling this variant */
1586 static void __ufshcd_release(struct ufs_hba
*hba
)
1588 if (!ufshcd_is_clkgating_allowed(hba
))
1591 hba
->clk_gating
.active_reqs
--;
1593 if (hba
->clk_gating
.active_reqs
|| hba
->clk_gating
.is_suspended
1594 || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
1595 || hba
->lrb_in_use
|| hba
->outstanding_tasks
1596 || hba
->active_uic_cmd
|| hba
->uic_async_done
1597 || ufshcd_eh_in_progress(hba
))
1600 hba
->clk_gating
.state
= REQ_CLKS_OFF
;
1601 trace_ufshcd_clk_gating(dev_name(hba
->dev
), hba
->clk_gating
.state
);
1602 schedule_delayed_work(&hba
->clk_gating
.gate_work
,
1603 msecs_to_jiffies(hba
->clk_gating
.delay_ms
));
1606 void ufshcd_release(struct ufs_hba
*hba
)
1608 unsigned long flags
;
1610 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1611 __ufshcd_release(hba
);
1612 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1614 EXPORT_SYMBOL_GPL(ufshcd_release
);
1616 static ssize_t
ufshcd_clkgate_delay_show(struct device
*dev
,
1617 struct device_attribute
*attr
, char *buf
)
1619 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1621 return snprintf(buf
, PAGE_SIZE
, "%lu\n", hba
->clk_gating
.delay_ms
);
1624 static ssize_t
ufshcd_clkgate_delay_store(struct device
*dev
,
1625 struct device_attribute
*attr
, const char *buf
, size_t count
)
1627 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1628 unsigned long flags
, value
;
1630 if (kstrtoul(buf
, 0, &value
))
1633 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1634 hba
->clk_gating
.delay_ms
= value
;
1635 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1639 static ssize_t
ufshcd_clkgate_enable_show(struct device
*dev
,
1640 struct device_attribute
*attr
, char *buf
)
1642 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1644 return snprintf(buf
, PAGE_SIZE
, "%d\n", hba
->clk_gating
.is_enabled
);
1647 static ssize_t
ufshcd_clkgate_enable_store(struct device
*dev
,
1648 struct device_attribute
*attr
, const char *buf
, size_t count
)
1650 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1651 unsigned long flags
;
1654 if (kstrtou32(buf
, 0, &value
))
1658 if (value
== hba
->clk_gating
.is_enabled
)
1662 ufshcd_release(hba
);
1664 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1665 hba
->clk_gating
.active_reqs
++;
1666 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1669 hba
->clk_gating
.is_enabled
= value
;
1674 static void ufshcd_init_clk_gating(struct ufs_hba
*hba
)
1676 if (!ufshcd_is_clkgating_allowed(hba
))
1679 hba
->clk_gating
.delay_ms
= 150;
1680 INIT_DELAYED_WORK(&hba
->clk_gating
.gate_work
, ufshcd_gate_work
);
1681 INIT_WORK(&hba
->clk_gating
.ungate_work
, ufshcd_ungate_work
);
1683 hba
->clk_gating
.is_enabled
= true;
1685 hba
->clk_gating
.delay_attr
.show
= ufshcd_clkgate_delay_show
;
1686 hba
->clk_gating
.delay_attr
.store
= ufshcd_clkgate_delay_store
;
1687 sysfs_attr_init(&hba
->clk_gating
.delay_attr
.attr
);
1688 hba
->clk_gating
.delay_attr
.attr
.name
= "clkgate_delay_ms";
1689 hba
->clk_gating
.delay_attr
.attr
.mode
= 0644;
1690 if (device_create_file(hba
->dev
, &hba
->clk_gating
.delay_attr
))
1691 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_delay\n");
1693 hba
->clk_gating
.enable_attr
.show
= ufshcd_clkgate_enable_show
;
1694 hba
->clk_gating
.enable_attr
.store
= ufshcd_clkgate_enable_store
;
1695 sysfs_attr_init(&hba
->clk_gating
.enable_attr
.attr
);
1696 hba
->clk_gating
.enable_attr
.attr
.name
= "clkgate_enable";
1697 hba
->clk_gating
.enable_attr
.attr
.mode
= 0644;
1698 if (device_create_file(hba
->dev
, &hba
->clk_gating
.enable_attr
))
1699 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_enable\n");
1702 static void ufshcd_exit_clk_gating(struct ufs_hba
*hba
)
1704 if (!ufshcd_is_clkgating_allowed(hba
))
1706 device_remove_file(hba
->dev
, &hba
->clk_gating
.delay_attr
);
1707 device_remove_file(hba
->dev
, &hba
->clk_gating
.enable_attr
);
1708 cancel_work_sync(&hba
->clk_gating
.ungate_work
);
1709 cancel_delayed_work_sync(&hba
->clk_gating
.gate_work
);
1712 /* Must be called with host lock acquired */
1713 static void ufshcd_clk_scaling_start_busy(struct ufs_hba
*hba
)
1715 bool queue_resume_work
= false;
1717 if (!ufshcd_is_clkscaling_supported(hba
))
1720 if (!hba
->clk_scaling
.active_reqs
++)
1721 queue_resume_work
= true;
1723 if (!hba
->clk_scaling
.is_allowed
|| hba
->pm_op_in_progress
)
1726 if (queue_resume_work
)
1727 queue_work(hba
->clk_scaling
.workq
,
1728 &hba
->clk_scaling
.resume_work
);
1730 if (!hba
->clk_scaling
.window_start_t
) {
1731 hba
->clk_scaling
.window_start_t
= jiffies
;
1732 hba
->clk_scaling
.tot_busy_t
= 0;
1733 hba
->clk_scaling
.is_busy_started
= false;
1736 if (!hba
->clk_scaling
.is_busy_started
) {
1737 hba
->clk_scaling
.busy_start_t
= ktime_get();
1738 hba
->clk_scaling
.is_busy_started
= true;
1742 static void ufshcd_clk_scaling_update_busy(struct ufs_hba
*hba
)
1744 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
1746 if (!ufshcd_is_clkscaling_supported(hba
))
1749 if (!hba
->outstanding_reqs
&& scaling
->is_busy_started
) {
1750 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
1751 scaling
->busy_start_t
));
1752 scaling
->busy_start_t
= 0;
1753 scaling
->is_busy_started
= false;
1757 * ufshcd_send_command - Send SCSI or device management commands
1758 * @hba: per adapter instance
1759 * @task_tag: Task tag of the command
1762 void ufshcd_send_command(struct ufs_hba
*hba
, unsigned int task_tag
)
1764 hba
->lrb
[task_tag
].issue_time_stamp
= ktime_get();
1765 ufshcd_clk_scaling_start_busy(hba
);
1766 __set_bit(task_tag
, &hba
->outstanding_reqs
);
1767 ufshcd_writel(hba
, 1 << task_tag
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
1768 /* Make sure that doorbell is committed immediately */
1770 ufshcd_add_command_trace(hba
, task_tag
, "send");
1774 * ufshcd_copy_sense_data - Copy sense data in case of check condition
1775 * @lrb - pointer to local reference block
1777 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb
*lrbp
)
1780 if (lrbp
->sense_buffer
&&
1781 ufshcd_get_rsp_upiu_data_seg_len(lrbp
->ucd_rsp_ptr
)) {
1784 len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.sense_data_len
);
1785 len_to_copy
= min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH
, len
);
1787 memcpy(lrbp
->sense_buffer
,
1788 lrbp
->ucd_rsp_ptr
->sr
.sense_data
,
1789 min_t(int, len_to_copy
, UFSHCD_REQ_SENSE_SIZE
));
1794 * ufshcd_copy_query_response() - Copy the Query Response and the data
1796 * @hba: per adapter instance
1797 * @lrb - pointer to local reference block
1800 int ufshcd_copy_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
1802 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
1804 memcpy(&query_res
->upiu_res
, &lrbp
->ucd_rsp_ptr
->qr
, QUERY_OSF_SIZE
);
1806 /* Get the descriptor */
1807 if (lrbp
->ucd_rsp_ptr
->qr
.opcode
== UPIU_QUERY_OPCODE_READ_DESC
) {
1808 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+
1809 GENERAL_UPIU_REQUEST_SIZE
;
1813 /* data segment length */
1814 resp_len
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->header
.dword_2
) &
1815 MASK_QUERY_DATA_SEG_LEN
;
1816 buf_len
= be16_to_cpu(
1817 hba
->dev_cmd
.query
.request
.upiu_req
.length
);
1818 if (likely(buf_len
>= resp_len
)) {
1819 memcpy(hba
->dev_cmd
.query
.descriptor
, descp
, resp_len
);
1822 "%s: Response size is bigger than buffer",
1832 * ufshcd_hba_capabilities - Read controller capabilities
1833 * @hba: per adapter instance
1835 static inline void ufshcd_hba_capabilities(struct ufs_hba
*hba
)
1837 hba
->capabilities
= ufshcd_readl(hba
, REG_CONTROLLER_CAPABILITIES
);
1839 /* nutrs and nutmrs are 0 based values */
1840 hba
->nutrs
= (hba
->capabilities
& MASK_TRANSFER_REQUESTS_SLOTS
) + 1;
1842 ((hba
->capabilities
& MASK_TASK_MANAGEMENT_REQUEST_SLOTS
) >> 16) + 1;
1846 * ufshcd_ready_for_uic_cmd - Check if controller is ready
1847 * to accept UIC commands
1848 * @hba: per adapter instance
1849 * Return true on success, else false
1851 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba
*hba
)
1853 if (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & UIC_COMMAND_READY
)
1860 * ufshcd_get_upmcrs - Get the power mode change request status
1861 * @hba: Pointer to adapter instance
1863 * This function gets the UPMCRS field of HCS register
1864 * Returns value of UPMCRS field
1866 static inline u8
ufshcd_get_upmcrs(struct ufs_hba
*hba
)
1868 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) >> 8) & 0x7;
1872 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1873 * @hba: per adapter instance
1874 * @uic_cmd: UIC command
1876 * Mutex must be held.
1879 ufshcd_dispatch_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
1881 WARN_ON(hba
->active_uic_cmd
);
1883 hba
->active_uic_cmd
= uic_cmd
;
1886 ufshcd_writel(hba
, uic_cmd
->argument1
, REG_UIC_COMMAND_ARG_1
);
1887 ufshcd_writel(hba
, uic_cmd
->argument2
, REG_UIC_COMMAND_ARG_2
);
1888 ufshcd_writel(hba
, uic_cmd
->argument3
, REG_UIC_COMMAND_ARG_3
);
1891 ufshcd_writel(hba
, uic_cmd
->command
& COMMAND_OPCODE_MASK
,
1896 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
1897 * @hba: per adapter instance
1898 * @uic_command: UIC command
1900 * Must be called with mutex held.
1901 * Returns 0 only if success.
1904 ufshcd_wait_for_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
1907 unsigned long flags
;
1909 if (wait_for_completion_timeout(&uic_cmd
->done
,
1910 msecs_to_jiffies(UIC_CMD_TIMEOUT
)))
1911 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
1915 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1916 hba
->active_uic_cmd
= NULL
;
1917 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1923 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1924 * @hba: per adapter instance
1925 * @uic_cmd: UIC command
1926 * @completion: initialize the completion only if this is set to true
1928 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
1929 * with mutex held and host_lock locked.
1930 * Returns 0 only if success.
1933 __ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
,
1936 if (!ufshcd_ready_for_uic_cmd(hba
)) {
1938 "Controller not ready to accept UIC commands\n");
1943 init_completion(&uic_cmd
->done
);
1945 ufshcd_dispatch_uic_cmd(hba
, uic_cmd
);
1951 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1952 * @hba: per adapter instance
1953 * @uic_cmd: UIC command
1955 * Returns 0 only if success.
1958 ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
1961 unsigned long flags
;
1963 ufshcd_hold(hba
, false);
1964 mutex_lock(&hba
->uic_cmd_mutex
);
1965 ufshcd_add_delay_before_dme_cmd(hba
);
1967 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1968 ret
= __ufshcd_send_uic_cmd(hba
, uic_cmd
, true);
1969 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1971 ret
= ufshcd_wait_for_uic_cmd(hba
, uic_cmd
);
1973 mutex_unlock(&hba
->uic_cmd_mutex
);
1975 ufshcd_release(hba
);
1980 * ufshcd_map_sg - Map scatter-gather list to prdt
1981 * @lrbp - pointer to local reference block
1983 * Returns 0 in case of success, non-zero value in case of failure
1985 static int ufshcd_map_sg(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
1987 struct ufshcd_sg_entry
*prd_table
;
1988 struct scatterlist
*sg
;
1989 struct scsi_cmnd
*cmd
;
1994 sg_segments
= scsi_dma_map(cmd
);
1995 if (sg_segments
< 0)
1999 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
2000 lrbp
->utr_descriptor_ptr
->prd_table_length
=
2001 cpu_to_le16((u16
)(sg_segments
*
2002 sizeof(struct ufshcd_sg_entry
)));
2004 lrbp
->utr_descriptor_ptr
->prd_table_length
=
2005 cpu_to_le16((u16
) (sg_segments
));
2007 prd_table
= (struct ufshcd_sg_entry
*)lrbp
->ucd_prdt_ptr
;
2009 scsi_for_each_sg(cmd
, sg
, sg_segments
, i
) {
2011 cpu_to_le32(((u32
) sg_dma_len(sg
))-1);
2012 prd_table
[i
].base_addr
=
2013 cpu_to_le32(lower_32_bits(sg
->dma_address
));
2014 prd_table
[i
].upper_addr
=
2015 cpu_to_le32(upper_32_bits(sg
->dma_address
));
2016 prd_table
[i
].reserved
= 0;
2019 lrbp
->utr_descriptor_ptr
->prd_table_length
= 0;
2026 * ufshcd_enable_intr - enable interrupts
2027 * @hba: per adapter instance
2028 * @intrs: interrupt bits
2030 static void ufshcd_enable_intr(struct ufs_hba
*hba
, u32 intrs
)
2032 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2034 if (hba
->ufs_version
== UFSHCI_VERSION_10
) {
2036 rw
= set
& INTERRUPT_MASK_RW_VER_10
;
2037 set
= rw
| ((set
^ intrs
) & intrs
);
2042 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2046 * ufshcd_disable_intr - disable interrupts
2047 * @hba: per adapter instance
2048 * @intrs: interrupt bits
2050 static void ufshcd_disable_intr(struct ufs_hba
*hba
, u32 intrs
)
2052 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2054 if (hba
->ufs_version
== UFSHCI_VERSION_10
) {
2056 rw
= (set
& INTERRUPT_MASK_RW_VER_10
) &
2057 ~(intrs
& INTERRUPT_MASK_RW_VER_10
);
2058 set
= rw
| ((set
& intrs
) & ~INTERRUPT_MASK_RW_VER_10
);
2064 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2068 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2069 * descriptor according to request
2070 * @lrbp: pointer to local reference block
2071 * @upiu_flags: flags required in the header
2072 * @cmd_dir: requests data direction
2074 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb
*lrbp
,
2075 u32
*upiu_flags
, enum dma_data_direction cmd_dir
)
2077 struct utp_transfer_req_desc
*req_desc
= lrbp
->utr_descriptor_ptr
;
2081 if (cmd_dir
== DMA_FROM_DEVICE
) {
2082 data_direction
= UTP_DEVICE_TO_HOST
;
2083 *upiu_flags
= UPIU_CMD_FLAGS_READ
;
2084 } else if (cmd_dir
== DMA_TO_DEVICE
) {
2085 data_direction
= UTP_HOST_TO_DEVICE
;
2086 *upiu_flags
= UPIU_CMD_FLAGS_WRITE
;
2088 data_direction
= UTP_NO_DATA_TRANSFER
;
2089 *upiu_flags
= UPIU_CMD_FLAGS_NONE
;
2092 dword_0
= data_direction
| (lrbp
->command_type
2093 << UPIU_COMMAND_TYPE_OFFSET
);
2095 dword_0
|= UTP_REQ_DESC_INT_CMD
;
2097 /* Transfer request descriptor header fields */
2098 req_desc
->header
.dword_0
= cpu_to_le32(dword_0
);
2099 /* dword_1 is reserved, hence it is set to 0 */
2100 req_desc
->header
.dword_1
= 0;
2102 * assigning invalid value for command status. Controller
2103 * updates OCS on command completion, with the command
2106 req_desc
->header
.dword_2
=
2107 cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
2108 /* dword_3 is reserved, hence it is set to 0 */
2109 req_desc
->header
.dword_3
= 0;
2111 req_desc
->prd_table_length
= 0;
2115 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2117 * @lrbp - local reference block pointer
2118 * @upiu_flags - flags
2121 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb
*lrbp
, u32 upiu_flags
)
2123 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2124 unsigned short cdb_len
;
2126 /* command descriptor fields */
2127 ucd_req_ptr
->header
.dword_0
= UPIU_HEADER_DWORD(
2128 UPIU_TRANSACTION_COMMAND
, upiu_flags
,
2129 lrbp
->lun
, lrbp
->task_tag
);
2130 ucd_req_ptr
->header
.dword_1
= UPIU_HEADER_DWORD(
2131 UPIU_COMMAND_SET_TYPE_SCSI
, 0, 0, 0);
2133 /* Total EHS length and Data segment length will be zero */
2134 ucd_req_ptr
->header
.dword_2
= 0;
2136 ucd_req_ptr
->sc
.exp_data_transfer_len
=
2137 cpu_to_be32(lrbp
->cmd
->sdb
.length
);
2139 cdb_len
= min_t(unsigned short, lrbp
->cmd
->cmd_len
, MAX_CDB_SIZE
);
2140 memset(ucd_req_ptr
->sc
.cdb
, 0, MAX_CDB_SIZE
);
2141 memcpy(ucd_req_ptr
->sc
.cdb
, lrbp
->cmd
->cmnd
, cdb_len
);
2143 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2147 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2150 * @lrbp: local reference block pointer
2151 * @upiu_flags: flags
2153 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba
*hba
,
2154 struct ufshcd_lrb
*lrbp
, u32 upiu_flags
)
2156 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2157 struct ufs_query
*query
= &hba
->dev_cmd
.query
;
2158 u16 len
= be16_to_cpu(query
->request
.upiu_req
.length
);
2159 u8
*descp
= (u8
*)lrbp
->ucd_req_ptr
+ GENERAL_UPIU_REQUEST_SIZE
;
2161 /* Query request header */
2162 ucd_req_ptr
->header
.dword_0
= UPIU_HEADER_DWORD(
2163 UPIU_TRANSACTION_QUERY_REQ
, upiu_flags
,
2164 lrbp
->lun
, lrbp
->task_tag
);
2165 ucd_req_ptr
->header
.dword_1
= UPIU_HEADER_DWORD(
2166 0, query
->request
.query_func
, 0, 0);
2168 /* Data segment length only need for WRITE_DESC */
2169 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2170 ucd_req_ptr
->header
.dword_2
=
2171 UPIU_HEADER_DWORD(0, 0, (len
>> 8), (u8
)len
);
2173 ucd_req_ptr
->header
.dword_2
= 0;
2175 /* Copy the Query Request buffer as is */
2176 memcpy(&ucd_req_ptr
->qr
, &query
->request
.upiu_req
,
2179 /* Copy the Descriptor */
2180 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2181 memcpy(descp
, query
->descriptor
, len
);
2183 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2186 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb
*lrbp
)
2188 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2190 memset(ucd_req_ptr
, 0, sizeof(struct utp_upiu_req
));
2192 /* command descriptor fields */
2193 ucd_req_ptr
->header
.dword_0
=
2195 UPIU_TRANSACTION_NOP_OUT
, 0, 0, lrbp
->task_tag
);
2196 /* clear rest of the fields of basic header */
2197 ucd_req_ptr
->header
.dword_1
= 0;
2198 ucd_req_ptr
->header
.dword_2
= 0;
2200 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2204 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2205 * for Device Management Purposes
2206 * @hba - per adapter instance
2207 * @lrb - pointer to local reference block
2209 static int ufshcd_comp_devman_upiu(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2214 if (hba
->ufs_version
== UFSHCI_VERSION_20
)
2215 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2217 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
2219 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
);
2220 if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_QUERY
)
2221 ufshcd_prepare_utp_query_req_upiu(hba
, lrbp
, upiu_flags
);
2222 else if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_NOP
)
2223 ufshcd_prepare_utp_nop_upiu(lrbp
);
2231 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2233 * @hba - per adapter instance
2234 * @lrb - pointer to local reference block
2236 static int ufshcd_comp_scsi_upiu(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2241 if (hba
->ufs_version
== UFSHCI_VERSION_20
)
2242 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2244 lrbp
->command_type
= UTP_CMD_TYPE_SCSI
;
2246 if (likely(lrbp
->cmd
)) {
2247 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
,
2248 lrbp
->cmd
->sc_data_direction
);
2249 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp
, upiu_flags
);
2258 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
2259 * @scsi_lun: scsi LUN id
2261 * Returns UPIU LUN id
2263 static inline u8
ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun
)
2265 if (scsi_is_wlun(scsi_lun
))
2266 return (scsi_lun
& UFS_UPIU_MAX_UNIT_NUM_ID
)
2269 return scsi_lun
& UFS_UPIU_MAX_UNIT_NUM_ID
;
2273 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2274 * @scsi_lun: UPIU W-LUN id
2276 * Returns SCSI W-LUN id
2278 static inline u16
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id
)
2280 return (upiu_wlun_id
& ~UFS_UPIU_WLUN_ID
) | SCSI_W_LUN_BASE
;
2284 * ufshcd_queuecommand - main entry point for SCSI requests
2285 * @cmd: command from SCSI Midlayer
2286 * @done: call back function
2288 * Returns 0 for success, non-zero in case of failure
2290 static int ufshcd_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*cmd
)
2292 struct ufshcd_lrb
*lrbp
;
2293 struct ufs_hba
*hba
;
2294 unsigned long flags
;
2298 hba
= shost_priv(host
);
2300 tag
= cmd
->request
->tag
;
2301 if (!ufshcd_valid_tag(hba
, tag
)) {
2303 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2304 __func__
, tag
, cmd
, cmd
->request
);
2308 if (!down_read_trylock(&hba
->clk_scaling_lock
))
2309 return SCSI_MLQUEUE_HOST_BUSY
;
2311 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2312 switch (hba
->ufshcd_state
) {
2313 case UFSHCD_STATE_OPERATIONAL
:
2315 case UFSHCD_STATE_EH_SCHEDULED
:
2316 case UFSHCD_STATE_RESET
:
2317 err
= SCSI_MLQUEUE_HOST_BUSY
;
2319 case UFSHCD_STATE_ERROR
:
2320 set_host_byte(cmd
, DID_ERROR
);
2321 cmd
->scsi_done(cmd
);
2324 dev_WARN_ONCE(hba
->dev
, 1, "%s: invalid state %d\n",
2325 __func__
, hba
->ufshcd_state
);
2326 set_host_byte(cmd
, DID_BAD_TARGET
);
2327 cmd
->scsi_done(cmd
);
2331 /* if error handling is in progress, don't issue commands */
2332 if (ufshcd_eh_in_progress(hba
)) {
2333 set_host_byte(cmd
, DID_ERROR
);
2334 cmd
->scsi_done(cmd
);
2337 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2339 hba
->req_abort_count
= 0;
2341 /* acquire the tag to make sure device cmds don't use it */
2342 if (test_and_set_bit_lock(tag
, &hba
->lrb_in_use
)) {
2344 * Dev manage command in progress, requeue the command.
2345 * Requeuing the command helps in cases where the request *may*
2346 * find different tag instead of waiting for dev manage command
2349 err
= SCSI_MLQUEUE_HOST_BUSY
;
2353 err
= ufshcd_hold(hba
, true);
2355 err
= SCSI_MLQUEUE_HOST_BUSY
;
2356 clear_bit_unlock(tag
, &hba
->lrb_in_use
);
2359 WARN_ON(hba
->clk_gating
.state
!= CLKS_ON
);
2361 lrbp
= &hba
->lrb
[tag
];
2365 lrbp
->sense_bufflen
= UFSHCD_REQ_SENSE_SIZE
;
2366 lrbp
->sense_buffer
= cmd
->sense_buffer
;
2367 lrbp
->task_tag
= tag
;
2368 lrbp
->lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
2369 lrbp
->intr_cmd
= !ufshcd_is_intr_aggr_allowed(hba
) ? true : false;
2370 lrbp
->req_abort_skip
= false;
2372 ufshcd_comp_scsi_upiu(hba
, lrbp
);
2374 err
= ufshcd_map_sg(hba
, lrbp
);
2377 clear_bit_unlock(tag
, &hba
->lrb_in_use
);
2380 /* Make sure descriptors are ready before ringing the doorbell */
2383 /* issue command to the controller */
2384 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2385 ufshcd_vops_setup_xfer_req(hba
, tag
, (lrbp
->cmd
? true : false));
2386 ufshcd_send_command(hba
, tag
);
2388 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2390 up_read(&hba
->clk_scaling_lock
);
2394 static int ufshcd_compose_dev_cmd(struct ufs_hba
*hba
,
2395 struct ufshcd_lrb
*lrbp
, enum dev_cmd_type cmd_type
, int tag
)
2398 lrbp
->sense_bufflen
= 0;
2399 lrbp
->sense_buffer
= NULL
;
2400 lrbp
->task_tag
= tag
;
2401 lrbp
->lun
= 0; /* device management cmd is not specific to any LUN */
2402 lrbp
->intr_cmd
= true; /* No interrupt aggregation */
2403 hba
->dev_cmd
.type
= cmd_type
;
2405 return ufshcd_comp_devman_upiu(hba
, lrbp
);
2409 ufshcd_clear_cmd(struct ufs_hba
*hba
, int tag
)
2412 unsigned long flags
;
2413 u32 mask
= 1 << tag
;
2415 /* clear outstanding transaction before retry */
2416 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2417 ufshcd_utrl_clear(hba
, tag
);
2418 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2421 * wait for for h/w to clear corresponding bit in door-bell.
2422 * max. wait is 1 sec.
2424 err
= ufshcd_wait_for_register(hba
,
2425 REG_UTP_TRANSFER_REQ_DOOR_BELL
,
2426 mask
, ~mask
, 1000, 1000, true);
2432 ufshcd_check_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2434 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
2436 /* Get the UPIU response */
2437 query_res
->response
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
) >>
2438 UPIU_RSP_CODE_OFFSET
;
2439 return query_res
->response
;
2443 * ufshcd_dev_cmd_completion() - handles device management command responses
2444 * @hba: per adapter instance
2445 * @lrbp: pointer to local reference block
2448 ufshcd_dev_cmd_completion(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2453 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
2454 resp
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
2457 case UPIU_TRANSACTION_NOP_IN
:
2458 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_NOP
) {
2460 dev_err(hba
->dev
, "%s: unexpected response %x\n",
2464 case UPIU_TRANSACTION_QUERY_RSP
:
2465 err
= ufshcd_check_query_response(hba
, lrbp
);
2467 err
= ufshcd_copy_query_response(hba
, lrbp
);
2469 case UPIU_TRANSACTION_REJECT_UPIU
:
2470 /* TODO: handle Reject UPIU Response */
2472 dev_err(hba
->dev
, "%s: Reject UPIU not fully implemented\n",
2477 dev_err(hba
->dev
, "%s: Invalid device management cmd response: %x\n",
2485 static int ufshcd_wait_for_dev_cmd(struct ufs_hba
*hba
,
2486 struct ufshcd_lrb
*lrbp
, int max_timeout
)
2489 unsigned long time_left
;
2490 unsigned long flags
;
2492 time_left
= wait_for_completion_timeout(hba
->dev_cmd
.complete
,
2493 msecs_to_jiffies(max_timeout
));
2495 /* Make sure descriptors are ready before ringing the doorbell */
2497 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2498 hba
->dev_cmd
.complete
= NULL
;
2499 if (likely(time_left
)) {
2500 err
= ufshcd_get_tr_ocs(lrbp
);
2502 err
= ufshcd_dev_cmd_completion(hba
, lrbp
);
2504 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2508 dev_dbg(hba
->dev
, "%s: dev_cmd request timedout, tag %d\n",
2509 __func__
, lrbp
->task_tag
);
2510 if (!ufshcd_clear_cmd(hba
, lrbp
->task_tag
))
2511 /* successfully cleared the command, retry if needed */
2514 * in case of an error, after clearing the doorbell,
2515 * we also need to clear the outstanding_request
2518 ufshcd_outstanding_req_clear(hba
, lrbp
->task_tag
);
2525 * ufshcd_get_dev_cmd_tag - Get device management command tag
2526 * @hba: per-adapter instance
2527 * @tag: pointer to variable with available slot value
2529 * Get a free slot and lock it until device management command
2532 * Returns false if free slot is unavailable for locking, else
2533 * return true with tag value in @tag.
2535 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba
*hba
, int *tag_out
)
2545 tmp
= ~hba
->lrb_in_use
;
2546 tag
= find_last_bit(&tmp
, hba
->nutrs
);
2547 if (tag
>= hba
->nutrs
)
2549 } while (test_and_set_bit_lock(tag
, &hba
->lrb_in_use
));
2557 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba
*hba
, int tag
)
2559 clear_bit_unlock(tag
, &hba
->lrb_in_use
);
2563 * ufshcd_exec_dev_cmd - API for sending device management requests
2565 * @cmd_type - specifies the type (NOP, Query...)
2566 * @timeout - time in seconds
2568 * NOTE: Since there is only one available tag for device management commands,
2569 * it is expected you hold the hba->dev_cmd.lock mutex.
2571 static int ufshcd_exec_dev_cmd(struct ufs_hba
*hba
,
2572 enum dev_cmd_type cmd_type
, int timeout
)
2574 struct ufshcd_lrb
*lrbp
;
2577 struct completion wait
;
2578 unsigned long flags
;
2580 down_read(&hba
->clk_scaling_lock
);
2583 * Get free slot, sleep if slots are unavailable.
2584 * Even though we use wait_event() which sleeps indefinitely,
2585 * the maximum wait time is bounded by SCSI request timeout.
2587 wait_event(hba
->dev_cmd
.tag_wq
, ufshcd_get_dev_cmd_tag(hba
, &tag
));
2589 init_completion(&wait
);
2590 lrbp
= &hba
->lrb
[tag
];
2592 err
= ufshcd_compose_dev_cmd(hba
, lrbp
, cmd_type
, tag
);
2596 hba
->dev_cmd
.complete
= &wait
;
2598 /* Make sure descriptors are ready before ringing the doorbell */
2600 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2601 ufshcd_vops_setup_xfer_req(hba
, tag
, (lrbp
->cmd
? true : false));
2602 ufshcd_send_command(hba
, tag
);
2603 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2605 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, timeout
);
2608 ufshcd_put_dev_cmd_tag(hba
, tag
);
2609 wake_up(&hba
->dev_cmd
.tag_wq
);
2610 up_read(&hba
->clk_scaling_lock
);
2615 * ufshcd_init_query() - init the query response and request parameters
2616 * @hba: per-adapter instance
2617 * @request: address of the request pointer to be initialized
2618 * @response: address of the response pointer to be initialized
2619 * @opcode: operation to perform
2620 * @idn: flag idn to access
2621 * @index: LU number to access
2622 * @selector: query/flag/descriptor further identification
2624 static inline void ufshcd_init_query(struct ufs_hba
*hba
,
2625 struct ufs_query_req
**request
, struct ufs_query_res
**response
,
2626 enum query_opcode opcode
, u8 idn
, u8 index
, u8 selector
)
2628 *request
= &hba
->dev_cmd
.query
.request
;
2629 *response
= &hba
->dev_cmd
.query
.response
;
2630 memset(*request
, 0, sizeof(struct ufs_query_req
));
2631 memset(*response
, 0, sizeof(struct ufs_query_res
));
2632 (*request
)->upiu_req
.opcode
= opcode
;
2633 (*request
)->upiu_req
.idn
= idn
;
2634 (*request
)->upiu_req
.index
= index
;
2635 (*request
)->upiu_req
.selector
= selector
;
2638 static int ufshcd_query_flag_retry(struct ufs_hba
*hba
,
2639 enum query_opcode opcode
, enum flag_idn idn
, bool *flag_res
)
2644 for (retries
= 0; retries
< QUERY_REQ_RETRIES
; retries
++) {
2645 ret
= ufshcd_query_flag(hba
, opcode
, idn
, flag_res
);
2648 "%s: failed with error %d, retries %d\n",
2649 __func__
, ret
, retries
);
2656 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2657 __func__
, opcode
, idn
, ret
, retries
);
2662 * ufshcd_query_flag() - API function for sending flag query requests
2663 * hba: per-adapter instance
2664 * query_opcode: flag query to perform
2665 * idn: flag idn to access
2666 * flag_res: the flag value after the query request completes
2668 * Returns 0 for success, non-zero in case of failure
2670 int ufshcd_query_flag(struct ufs_hba
*hba
, enum query_opcode opcode
,
2671 enum flag_idn idn
, bool *flag_res
)
2673 struct ufs_query_req
*request
= NULL
;
2674 struct ufs_query_res
*response
= NULL
;
2675 int err
, index
= 0, selector
= 0;
2676 int timeout
= QUERY_REQ_TIMEOUT
;
2680 ufshcd_hold(hba
, false);
2681 mutex_lock(&hba
->dev_cmd
.lock
);
2682 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
2686 case UPIU_QUERY_OPCODE_SET_FLAG
:
2687 case UPIU_QUERY_OPCODE_CLEAR_FLAG
:
2688 case UPIU_QUERY_OPCODE_TOGGLE_FLAG
:
2689 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
2691 case UPIU_QUERY_OPCODE_READ_FLAG
:
2692 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
2694 /* No dummy reads */
2695 dev_err(hba
->dev
, "%s: Invalid argument for read request\n",
2703 "%s: Expected query flag opcode but got = %d\n",
2709 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, timeout
);
2713 "%s: Sending flag query for idn %d failed, err = %d\n",
2714 __func__
, idn
, err
);
2719 *flag_res
= (be32_to_cpu(response
->upiu_res
.value
) &
2720 MASK_QUERY_UPIU_FLAG_LOC
) & 0x1;
2723 mutex_unlock(&hba
->dev_cmd
.lock
);
2724 ufshcd_release(hba
);
2729 * ufshcd_query_attr - API function for sending attribute requests
2730 * hba: per-adapter instance
2731 * opcode: attribute opcode
2732 * idn: attribute idn to access
2733 * index: index field
2734 * selector: selector field
2735 * attr_val: the attribute value after the query request completes
2737 * Returns 0 for success, non-zero in case of failure
2739 static int ufshcd_query_attr(struct ufs_hba
*hba
, enum query_opcode opcode
,
2740 enum attr_idn idn
, u8 index
, u8 selector
, u32
*attr_val
)
2742 struct ufs_query_req
*request
= NULL
;
2743 struct ufs_query_res
*response
= NULL
;
2748 ufshcd_hold(hba
, false);
2750 dev_err(hba
->dev
, "%s: attribute value required for opcode 0x%x\n",
2756 mutex_lock(&hba
->dev_cmd
.lock
);
2757 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
2761 case UPIU_QUERY_OPCODE_WRITE_ATTR
:
2762 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
2763 request
->upiu_req
.value
= cpu_to_be32(*attr_val
);
2765 case UPIU_QUERY_OPCODE_READ_ATTR
:
2766 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
2769 dev_err(hba
->dev
, "%s: Expected query attr opcode but got = 0x%.2x\n",
2775 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
2778 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2779 __func__
, opcode
, idn
, index
, err
);
2783 *attr_val
= be32_to_cpu(response
->upiu_res
.value
);
2786 mutex_unlock(&hba
->dev_cmd
.lock
);
2788 ufshcd_release(hba
);
2793 * ufshcd_query_attr_retry() - API function for sending query
2794 * attribute with retries
2795 * @hba: per-adapter instance
2796 * @opcode: attribute opcode
2797 * @idn: attribute idn to access
2798 * @index: index field
2799 * @selector: selector field
2800 * @attr_val: the attribute value after the query request
2803 * Returns 0 for success, non-zero in case of failure
2805 static int ufshcd_query_attr_retry(struct ufs_hba
*hba
,
2806 enum query_opcode opcode
, enum attr_idn idn
, u8 index
, u8 selector
,
2812 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
2813 ret
= ufshcd_query_attr(hba
, opcode
, idn
, index
,
2814 selector
, attr_val
);
2816 dev_dbg(hba
->dev
, "%s: failed with error %d, retries %d\n",
2817 __func__
, ret
, retries
);
2824 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2825 __func__
, idn
, ret
, QUERY_REQ_RETRIES
);
2829 static int __ufshcd_query_descriptor(struct ufs_hba
*hba
,
2830 enum query_opcode opcode
, enum desc_idn idn
, u8 index
,
2831 u8 selector
, u8
*desc_buf
, int *buf_len
)
2833 struct ufs_query_req
*request
= NULL
;
2834 struct ufs_query_res
*response
= NULL
;
2839 ufshcd_hold(hba
, false);
2841 dev_err(hba
->dev
, "%s: descriptor buffer required for opcode 0x%x\n",
2847 if (*buf_len
< QUERY_DESC_MIN_SIZE
|| *buf_len
> QUERY_DESC_MAX_SIZE
) {
2848 dev_err(hba
->dev
, "%s: descriptor buffer size (%d) is out of range\n",
2849 __func__
, *buf_len
);
2854 mutex_lock(&hba
->dev_cmd
.lock
);
2855 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
2857 hba
->dev_cmd
.query
.descriptor
= desc_buf
;
2858 request
->upiu_req
.length
= cpu_to_be16(*buf_len
);
2861 case UPIU_QUERY_OPCODE_WRITE_DESC
:
2862 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
2864 case UPIU_QUERY_OPCODE_READ_DESC
:
2865 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
2869 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2875 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
2878 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2879 __func__
, opcode
, idn
, index
, err
);
2883 hba
->dev_cmd
.query
.descriptor
= NULL
;
2884 *buf_len
= be16_to_cpu(response
->upiu_res
.length
);
2887 mutex_unlock(&hba
->dev_cmd
.lock
);
2889 ufshcd_release(hba
);
2894 * ufshcd_query_descriptor_retry - API function for sending descriptor
2896 * hba: per-adapter instance
2897 * opcode: attribute opcode
2898 * idn: attribute idn to access
2899 * index: index field
2900 * selector: selector field
2901 * desc_buf: the buffer that contains the descriptor
2902 * buf_len: length parameter passed to the device
2904 * Returns 0 for success, non-zero in case of failure.
2905 * The buf_len parameter will contain, on return, the length parameter
2906 * received on the response.
2908 static int ufshcd_query_descriptor_retry(struct ufs_hba
*hba
,
2909 enum query_opcode opcode
,
2910 enum desc_idn idn
, u8 index
,
2912 u8
*desc_buf
, int *buf_len
)
2917 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
2918 err
= __ufshcd_query_descriptor(hba
, opcode
, idn
, index
,
2919 selector
, desc_buf
, buf_len
);
2920 if (!err
|| err
== -EINVAL
)
2928 * ufshcd_read_desc_length - read the specified descriptor length from header
2929 * @hba: Pointer to adapter instance
2930 * @desc_id: descriptor idn value
2931 * @desc_index: descriptor index
2932 * @desc_length: pointer to variable to read the length of descriptor
2934 * Return 0 in case of success, non-zero otherwise
2936 static int ufshcd_read_desc_length(struct ufs_hba
*hba
,
2937 enum desc_idn desc_id
,
2942 u8 header
[QUERY_DESC_HDR_SIZE
];
2943 int header_len
= QUERY_DESC_HDR_SIZE
;
2945 if (desc_id
>= QUERY_DESC_IDN_MAX
)
2948 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
2949 desc_id
, desc_index
, 0, header
,
2953 dev_err(hba
->dev
, "%s: Failed to get descriptor header id %d",
2956 } else if (desc_id
!= header
[QUERY_DESC_DESC_TYPE_OFFSET
]) {
2957 dev_warn(hba
->dev
, "%s: descriptor header id %d and desc_id %d mismatch",
2958 __func__
, header
[QUERY_DESC_DESC_TYPE_OFFSET
],
2963 *desc_length
= header
[QUERY_DESC_LENGTH_OFFSET
];
2969 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
2970 * @hba: Pointer to adapter instance
2971 * @desc_id: descriptor idn value
2972 * @desc_len: mapped desc length (out)
2974 * Return 0 in case of success, non-zero otherwise
2976 int ufshcd_map_desc_id_to_length(struct ufs_hba
*hba
,
2977 enum desc_idn desc_id
, int *desc_len
)
2980 case QUERY_DESC_IDN_DEVICE
:
2981 *desc_len
= hba
->desc_size
.dev_desc
;
2983 case QUERY_DESC_IDN_POWER
:
2984 *desc_len
= hba
->desc_size
.pwr_desc
;
2986 case QUERY_DESC_IDN_GEOMETRY
:
2987 *desc_len
= hba
->desc_size
.geom_desc
;
2989 case QUERY_DESC_IDN_CONFIGURATION
:
2990 *desc_len
= hba
->desc_size
.conf_desc
;
2992 case QUERY_DESC_IDN_UNIT
:
2993 *desc_len
= hba
->desc_size
.unit_desc
;
2995 case QUERY_DESC_IDN_INTERCONNECT
:
2996 *desc_len
= hba
->desc_size
.interc_desc
;
2998 case QUERY_DESC_IDN_STRING
:
2999 *desc_len
= QUERY_DESC_MAX_SIZE
;
3001 case QUERY_DESC_IDN_RFU_0
:
3002 case QUERY_DESC_IDN_RFU_1
:
3011 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length
);
3014 * ufshcd_read_desc_param - read the specified descriptor parameter
3015 * @hba: Pointer to adapter instance
3016 * @desc_id: descriptor idn value
3017 * @desc_index: descriptor index
3018 * @param_offset: offset of the parameter to read
3019 * @param_read_buf: pointer to buffer where parameter would be read
3020 * @param_size: sizeof(param_read_buf)
3022 * Return 0 in case of success, non-zero otherwise
3024 static int ufshcd_read_desc_param(struct ufs_hba
*hba
,
3025 enum desc_idn desc_id
,
3034 bool is_kmalloc
= true;
3037 if (desc_id
>= QUERY_DESC_IDN_MAX
|| !param_size
)
3040 /* Get the max length of descriptor from structure filled up at probe
3043 ret
= ufshcd_map_desc_id_to_length(hba
, desc_id
, &buff_len
);
3046 if (ret
|| !buff_len
) {
3047 dev_err(hba
->dev
, "%s: Failed to get full descriptor length",
3052 /* Check whether we need temp memory */
3053 if (param_offset
!= 0 || param_size
< buff_len
) {
3054 desc_buf
= kmalloc(buff_len
, GFP_KERNEL
);
3058 desc_buf
= param_read_buf
;
3062 /* Request for full descriptor */
3063 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
3064 desc_id
, desc_index
, 0,
3065 desc_buf
, &buff_len
);
3068 dev_err(hba
->dev
, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3069 __func__
, desc_id
, desc_index
, param_offset
, ret
);
3074 if (desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
] != desc_id
) {
3075 dev_err(hba
->dev
, "%s: invalid desc_id %d in descriptor header",
3076 __func__
, desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
]);
3081 /* Check wherher we will not copy more data, than available */
3082 if (is_kmalloc
&& param_size
> buff_len
)
3083 param_size
= buff_len
;
3086 memcpy(param_read_buf
, &desc_buf
[param_offset
], param_size
);
3093 static inline int ufshcd_read_desc(struct ufs_hba
*hba
,
3094 enum desc_idn desc_id
,
3099 return ufshcd_read_desc_param(hba
, desc_id
, desc_index
, 0, buf
, size
);
3102 static inline int ufshcd_read_power_desc(struct ufs_hba
*hba
,
3109 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3110 /* Read descriptor*/
3111 err
= ufshcd_read_desc(hba
, QUERY_DESC_IDN_POWER
, 0, buf
, size
);
3114 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
3120 static int ufshcd_read_device_desc(struct ufs_hba
*hba
, u8
*buf
, u32 size
)
3122 return ufshcd_read_desc(hba
, QUERY_DESC_IDN_DEVICE
, 0, buf
, size
);
3126 * ufshcd_read_string_desc - read string descriptor
3127 * @hba: pointer to adapter instance
3128 * @desc_index: descriptor index
3129 * @buf: pointer to buffer where descriptor would be read
3130 * @size: size of buf
3131 * @ascii: if true convert from unicode to ascii characters
3133 * Return 0 in case of success, non-zero otherwise
3135 #define ASCII_STD true
3136 static int ufshcd_read_string_desc(struct ufs_hba
*hba
, int desc_index
,
3137 u8
*buf
, u32 size
, bool ascii
)
3141 err
= ufshcd_read_desc(hba
,
3142 QUERY_DESC_IDN_STRING
, desc_index
, buf
, size
);
3145 dev_err(hba
->dev
, "%s: reading String Desc failed after %d retries. err = %d\n",
3146 __func__
, QUERY_REQ_RETRIES
, err
);
3157 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3158 ascii_len
= (desc_len
- QUERY_DESC_HDR_SIZE
) / 2 + 1;
3159 if (size
< ascii_len
+ QUERY_DESC_HDR_SIZE
) {
3160 dev_err(hba
->dev
, "%s: buffer allocated size is too small\n",
3166 buff_ascii
= kmalloc(ascii_len
, GFP_KERNEL
);
3173 * the descriptor contains string in UTF16 format
3174 * we need to convert to utf-8 so it can be displayed
3176 utf16s_to_utf8s((wchar_t *)&buf
[QUERY_DESC_HDR_SIZE
],
3177 desc_len
- QUERY_DESC_HDR_SIZE
,
3178 UTF16_BIG_ENDIAN
, buff_ascii
, ascii_len
);
3180 /* replace non-printable or non-ASCII characters with spaces */
3181 for (i
= 0; i
< ascii_len
; i
++)
3182 ufshcd_remove_non_printable(&buff_ascii
[i
]);
3184 memset(buf
+ QUERY_DESC_HDR_SIZE
, 0,
3185 size
- QUERY_DESC_HDR_SIZE
);
3186 memcpy(buf
+ QUERY_DESC_HDR_SIZE
, buff_ascii
, ascii_len
);
3187 buf
[QUERY_DESC_LENGTH_OFFSET
] = ascii_len
+ QUERY_DESC_HDR_SIZE
;
3195 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3196 * @hba: Pointer to adapter instance
3198 * @param_offset: offset of the parameter to read
3199 * @param_read_buf: pointer to buffer where parameter would be read
3200 * @param_size: sizeof(param_read_buf)
3202 * Return 0 in case of success, non-zero otherwise
3204 static inline int ufshcd_read_unit_desc_param(struct ufs_hba
*hba
,
3206 enum unit_desc_param param_offset
,
3211 * Unit descriptors are only available for general purpose LUs (LUN id
3212 * from 0 to 7) and RPMB Well known LU.
3214 if (lun
!= UFS_UPIU_RPMB_WLUN
&& (lun
>= UFS_UPIU_MAX_GENERAL_LUN
))
3217 return ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_UNIT
, lun
,
3218 param_offset
, param_read_buf
, param_size
);
3222 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3223 * @hba: per adapter instance
3225 * 1. Allocate DMA memory for Command Descriptor array
3226 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3227 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3228 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3230 * 4. Allocate memory for local reference block(lrb).
3232 * Returns 0 for success, non-zero in case of failure
3234 static int ufshcd_memory_alloc(struct ufs_hba
*hba
)
3236 size_t utmrdl_size
, utrdl_size
, ucdl_size
;
3238 /* Allocate memory for UTP command descriptors */
3239 ucdl_size
= (sizeof(struct utp_transfer_cmd_desc
) * hba
->nutrs
);
3240 hba
->ucdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3242 &hba
->ucdl_dma_addr
,
3246 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3247 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3248 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3249 * be aligned to 128 bytes as well
3251 if (!hba
->ucdl_base_addr
||
3252 WARN_ON(hba
->ucdl_dma_addr
& (PAGE_SIZE
- 1))) {
3254 "Command Descriptor Memory allocation failed\n");
3259 * Allocate memory for UTP Transfer descriptors
3260 * UFSHCI requires 1024 byte alignment of UTRD
3262 utrdl_size
= (sizeof(struct utp_transfer_req_desc
) * hba
->nutrs
);
3263 hba
->utrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3265 &hba
->utrdl_dma_addr
,
3267 if (!hba
->utrdl_base_addr
||
3268 WARN_ON(hba
->utrdl_dma_addr
& (PAGE_SIZE
- 1))) {
3270 "Transfer Descriptor Memory allocation failed\n");
3275 * Allocate memory for UTP Task Management descriptors
3276 * UFSHCI requires 1024 byte alignment of UTMRD
3278 utmrdl_size
= sizeof(struct utp_task_req_desc
) * hba
->nutmrs
;
3279 hba
->utmrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3281 &hba
->utmrdl_dma_addr
,
3283 if (!hba
->utmrdl_base_addr
||
3284 WARN_ON(hba
->utmrdl_dma_addr
& (PAGE_SIZE
- 1))) {
3286 "Task Management Descriptor Memory allocation failed\n");
3290 /* Allocate memory for local reference block */
3291 hba
->lrb
= devm_kzalloc(hba
->dev
,
3292 hba
->nutrs
* sizeof(struct ufshcd_lrb
),
3295 dev_err(hba
->dev
, "LRB Memory allocation failed\n");
3304 * ufshcd_host_memory_configure - configure local reference block with
3306 * @hba: per adapter instance
3308 * Configure Host memory space
3309 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3311 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3313 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3314 * into local reference block.
3316 static void ufshcd_host_memory_configure(struct ufs_hba
*hba
)
3318 struct utp_transfer_cmd_desc
*cmd_descp
;
3319 struct utp_transfer_req_desc
*utrdlp
;
3320 dma_addr_t cmd_desc_dma_addr
;
3321 dma_addr_t cmd_desc_element_addr
;
3322 u16 response_offset
;
3327 utrdlp
= hba
->utrdl_base_addr
;
3328 cmd_descp
= hba
->ucdl_base_addr
;
3331 offsetof(struct utp_transfer_cmd_desc
, response_upiu
);
3333 offsetof(struct utp_transfer_cmd_desc
, prd_table
);
3335 cmd_desc_size
= sizeof(struct utp_transfer_cmd_desc
);
3336 cmd_desc_dma_addr
= hba
->ucdl_dma_addr
;
3338 for (i
= 0; i
< hba
->nutrs
; i
++) {
3339 /* Configure UTRD with command descriptor base address */
3340 cmd_desc_element_addr
=
3341 (cmd_desc_dma_addr
+ (cmd_desc_size
* i
));
3342 utrdlp
[i
].command_desc_base_addr_lo
=
3343 cpu_to_le32(lower_32_bits(cmd_desc_element_addr
));
3344 utrdlp
[i
].command_desc_base_addr_hi
=
3345 cpu_to_le32(upper_32_bits(cmd_desc_element_addr
));
3347 /* Response upiu and prdt offset should be in double words */
3348 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
) {
3349 utrdlp
[i
].response_upiu_offset
=
3350 cpu_to_le16(response_offset
);
3351 utrdlp
[i
].prd_table_offset
=
3352 cpu_to_le16(prdt_offset
);
3353 utrdlp
[i
].response_upiu_length
=
3354 cpu_to_le16(ALIGNED_UPIU_SIZE
);
3356 utrdlp
[i
].response_upiu_offset
=
3357 cpu_to_le16((response_offset
>> 2));
3358 utrdlp
[i
].prd_table_offset
=
3359 cpu_to_le16((prdt_offset
>> 2));
3360 utrdlp
[i
].response_upiu_length
=
3361 cpu_to_le16(ALIGNED_UPIU_SIZE
>> 2);
3364 hba
->lrb
[i
].utr_descriptor_ptr
= (utrdlp
+ i
);
3365 hba
->lrb
[i
].utrd_dma_addr
= hba
->utrdl_dma_addr
+
3366 (i
* sizeof(struct utp_transfer_req_desc
));
3367 hba
->lrb
[i
].ucd_req_ptr
=
3368 (struct utp_upiu_req
*)(cmd_descp
+ i
);
3369 hba
->lrb
[i
].ucd_req_dma_addr
= cmd_desc_element_addr
;
3370 hba
->lrb
[i
].ucd_rsp_ptr
=
3371 (struct utp_upiu_rsp
*)cmd_descp
[i
].response_upiu
;
3372 hba
->lrb
[i
].ucd_rsp_dma_addr
= cmd_desc_element_addr
+
3374 hba
->lrb
[i
].ucd_prdt_ptr
=
3375 (struct ufshcd_sg_entry
*)cmd_descp
[i
].prd_table
;
3376 hba
->lrb
[i
].ucd_prdt_dma_addr
= cmd_desc_element_addr
+
3382 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3383 * @hba: per adapter instance
3385 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3386 * in order to initialize the Unipro link startup procedure.
3387 * Once the Unipro links are up, the device connected to the controller
3390 * Returns 0 on success, non-zero value on failure
3392 static int ufshcd_dme_link_startup(struct ufs_hba
*hba
)
3394 struct uic_command uic_cmd
= {0};
3397 uic_cmd
.command
= UIC_CMD_DME_LINK_STARTUP
;
3399 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3402 "dme-link-startup: error code %d\n", ret
);
3406 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
)
3408 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3409 unsigned long min_sleep_time_us
;
3411 if (!(hba
->quirks
& UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
))
3415 * last_dme_cmd_tstamp will be 0 only for 1st call to
3418 if (unlikely(!ktime_to_us(hba
->last_dme_cmd_tstamp
))) {
3419 min_sleep_time_us
= MIN_DELAY_BEFORE_DME_CMDS_US
;
3421 unsigned long delta
=
3422 (unsigned long) ktime_to_us(
3423 ktime_sub(ktime_get(),
3424 hba
->last_dme_cmd_tstamp
));
3426 if (delta
< MIN_DELAY_BEFORE_DME_CMDS_US
)
3428 MIN_DELAY_BEFORE_DME_CMDS_US
- delta
;
3430 return; /* no more delay required */
3433 /* allow sleep for extra 50us if needed */
3434 usleep_range(min_sleep_time_us
, min_sleep_time_us
+ 50);
3438 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3439 * @hba: per adapter instance
3440 * @attr_sel: uic command argument1
3441 * @attr_set: attribute set type as uic command argument2
3442 * @mib_val: setting value as uic command argument3
3443 * @peer: indicate whether peer or local
3445 * Returns 0 on success, non-zero value on failure
3447 int ufshcd_dme_set_attr(struct ufs_hba
*hba
, u32 attr_sel
,
3448 u8 attr_set
, u32 mib_val
, u8 peer
)
3450 struct uic_command uic_cmd
= {0};
3451 static const char *const action
[] = {
3455 const char *set
= action
[!!peer
];
3457 int retries
= UFS_UIC_COMMAND_RETRIES
;
3459 uic_cmd
.command
= peer
?
3460 UIC_CMD_DME_PEER_SET
: UIC_CMD_DME_SET
;
3461 uic_cmd
.argument1
= attr_sel
;
3462 uic_cmd
.argument2
= UIC_ARG_ATTR_TYPE(attr_set
);
3463 uic_cmd
.argument3
= mib_val
;
3466 /* for peer attributes we retry upon failure */
3467 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3469 dev_dbg(hba
->dev
, "%s: attr-id 0x%x val 0x%x error code %d\n",
3470 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
, ret
);
3471 } while (ret
&& peer
&& --retries
);
3474 dev_err(hba
->dev
, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3475 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
,
3476 UFS_UIC_COMMAND_RETRIES
- retries
);
3480 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr
);
3483 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3484 * @hba: per adapter instance
3485 * @attr_sel: uic command argument1
3486 * @mib_val: the value of the attribute as returned by the UIC command
3487 * @peer: indicate whether peer or local
3489 * Returns 0 on success, non-zero value on failure
3491 int ufshcd_dme_get_attr(struct ufs_hba
*hba
, u32 attr_sel
,
3492 u32
*mib_val
, u8 peer
)
3494 struct uic_command uic_cmd
= {0};
3495 static const char *const action
[] = {
3499 const char *get
= action
[!!peer
];
3501 int retries
= UFS_UIC_COMMAND_RETRIES
;
3502 struct ufs_pa_layer_attr orig_pwr_info
;
3503 struct ufs_pa_layer_attr temp_pwr_info
;
3504 bool pwr_mode_change
= false;
3506 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)) {
3507 orig_pwr_info
= hba
->pwr_info
;
3508 temp_pwr_info
= orig_pwr_info
;
3510 if (orig_pwr_info
.pwr_tx
== FAST_MODE
||
3511 orig_pwr_info
.pwr_rx
== FAST_MODE
) {
3512 temp_pwr_info
.pwr_tx
= FASTAUTO_MODE
;
3513 temp_pwr_info
.pwr_rx
= FASTAUTO_MODE
;
3514 pwr_mode_change
= true;
3515 } else if (orig_pwr_info
.pwr_tx
== SLOW_MODE
||
3516 orig_pwr_info
.pwr_rx
== SLOW_MODE
) {
3517 temp_pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
3518 temp_pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
3519 pwr_mode_change
= true;
3521 if (pwr_mode_change
) {
3522 ret
= ufshcd_change_power_mode(hba
, &temp_pwr_info
);
3528 uic_cmd
.command
= peer
?
3529 UIC_CMD_DME_PEER_GET
: UIC_CMD_DME_GET
;
3530 uic_cmd
.argument1
= attr_sel
;
3533 /* for peer attributes we retry upon failure */
3534 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3536 dev_dbg(hba
->dev
, "%s: attr-id 0x%x error code %d\n",
3537 get
, UIC_GET_ATTR_ID(attr_sel
), ret
);
3538 } while (ret
&& peer
&& --retries
);
3541 dev_err(hba
->dev
, "%s: attr-id 0x%x failed %d retries\n",
3542 get
, UIC_GET_ATTR_ID(attr_sel
),
3543 UFS_UIC_COMMAND_RETRIES
- retries
);
3545 if (mib_val
&& !ret
)
3546 *mib_val
= uic_cmd
.argument3
;
3548 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)
3550 ufshcd_change_power_mode(hba
, &orig_pwr_info
);
3554 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr
);
3557 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3558 * state) and waits for it to take effect.
3560 * @hba: per adapter instance
3561 * @cmd: UIC command to execute
3563 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3564 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3565 * and device UniPro link and hence it's final completion would be indicated by
3566 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3567 * addition to normal UIC command completion Status (UCCS). This function only
3568 * returns after the relevant status bits indicate the completion.
3570 * Returns 0 on success, non-zero value on failure
3572 static int ufshcd_uic_pwr_ctrl(struct ufs_hba
*hba
, struct uic_command
*cmd
)
3574 struct completion uic_async_done
;
3575 unsigned long flags
;
3578 bool reenable_intr
= false;
3580 mutex_lock(&hba
->uic_cmd_mutex
);
3581 init_completion(&uic_async_done
);
3582 ufshcd_add_delay_before_dme_cmd(hba
);
3584 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3585 hba
->uic_async_done
= &uic_async_done
;
3586 if (ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
) & UIC_COMMAND_COMPL
) {
3587 ufshcd_disable_intr(hba
, UIC_COMMAND_COMPL
);
3589 * Make sure UIC command completion interrupt is disabled before
3590 * issuing UIC command.
3593 reenable_intr
= true;
3595 ret
= __ufshcd_send_uic_cmd(hba
, cmd
, false);
3596 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3599 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3600 cmd
->command
, cmd
->argument3
, ret
);
3604 if (!wait_for_completion_timeout(hba
->uic_async_done
,
3605 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
3607 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3608 cmd
->command
, cmd
->argument3
);
3613 status
= ufshcd_get_upmcrs(hba
);
3614 if (status
!= PWR_LOCAL
) {
3616 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
3617 cmd
->command
, status
);
3618 ret
= (status
!= PWR_OK
) ? status
: -1;
3622 ufshcd_print_host_state(hba
);
3623 ufshcd_print_pwr_info(hba
);
3624 ufshcd_print_host_regs(hba
);
3627 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3628 hba
->active_uic_cmd
= NULL
;
3629 hba
->uic_async_done
= NULL
;
3631 ufshcd_enable_intr(hba
, UIC_COMMAND_COMPL
);
3632 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3633 mutex_unlock(&hba
->uic_cmd_mutex
);
3639 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3640 * using DME_SET primitives.
3641 * @hba: per adapter instance
3642 * @mode: powr mode value
3644 * Returns 0 on success, non-zero value on failure
3646 static int ufshcd_uic_change_pwr_mode(struct ufs_hba
*hba
, u8 mode
)
3648 struct uic_command uic_cmd
= {0};
3651 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
) {
3652 ret
= ufshcd_dme_set(hba
,
3653 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP
, 0), 1);
3655 dev_err(hba
->dev
, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3661 uic_cmd
.command
= UIC_CMD_DME_SET
;
3662 uic_cmd
.argument1
= UIC_ARG_MIB(PA_PWRMODE
);
3663 uic_cmd
.argument3
= mode
;
3664 ufshcd_hold(hba
, false);
3665 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
3666 ufshcd_release(hba
);
3672 static int ufshcd_link_recovery(struct ufs_hba
*hba
)
3675 unsigned long flags
;
3677 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3678 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
3679 ufshcd_set_eh_in_progress(hba
);
3680 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3682 ret
= ufshcd_host_reset_and_restore(hba
);
3684 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3686 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
3687 ufshcd_clear_eh_in_progress(hba
);
3688 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3691 dev_err(hba
->dev
, "%s: link recovery failed, err %d",
3697 static int __ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
)
3700 struct uic_command uic_cmd
= {0};
3701 ktime_t start
= ktime_get();
3703 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
, PRE_CHANGE
);
3705 uic_cmd
.command
= UIC_CMD_DME_HIBER_ENTER
;
3706 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
3707 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "enter",
3708 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
3711 dev_err(hba
->dev
, "%s: hibern8 enter failed. ret = %d\n",
3715 * If link recovery fails then return error so that caller
3716 * don't retry the hibern8 enter again.
3718 if (ufshcd_link_recovery(hba
))
3721 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
,
3727 static int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
)
3729 int ret
= 0, retries
;
3731 for (retries
= UIC_HIBERN8_ENTER_RETRIES
; retries
> 0; retries
--) {
3732 ret
= __ufshcd_uic_hibern8_enter(hba
);
3733 if (!ret
|| ret
== -ENOLINK
)
3740 static int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
)
3742 struct uic_command uic_cmd
= {0};
3744 ktime_t start
= ktime_get();
3746 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
, PRE_CHANGE
);
3748 uic_cmd
.command
= UIC_CMD_DME_HIBER_EXIT
;
3749 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
3750 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "exit",
3751 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
3754 dev_err(hba
->dev
, "%s: hibern8 exit failed. ret = %d\n",
3756 ret
= ufshcd_link_recovery(hba
);
3758 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
,
3760 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_get();
3761 hba
->ufs_stats
.hibern8_exit_cnt
++;
3768 * ufshcd_init_pwr_info - setting the POR (power on reset)
3769 * values in hba power info
3770 * @hba: per-adapter instance
3772 static void ufshcd_init_pwr_info(struct ufs_hba
*hba
)
3774 hba
->pwr_info
.gear_rx
= UFS_PWM_G1
;
3775 hba
->pwr_info
.gear_tx
= UFS_PWM_G1
;
3776 hba
->pwr_info
.lane_rx
= 1;
3777 hba
->pwr_info
.lane_tx
= 1;
3778 hba
->pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
3779 hba
->pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
3780 hba
->pwr_info
.hs_rate
= 0;
3784 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3785 * @hba: per-adapter instance
3787 static int ufshcd_get_max_pwr_mode(struct ufs_hba
*hba
)
3789 struct ufs_pa_layer_attr
*pwr_info
= &hba
->max_pwr_info
.info
;
3791 if (hba
->max_pwr_info
.is_valid
)
3794 pwr_info
->pwr_tx
= FAST_MODE
;
3795 pwr_info
->pwr_rx
= FAST_MODE
;
3796 pwr_info
->hs_rate
= PA_HS_MODE_B
;
3798 /* Get the connected lane count */
3799 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES
),
3800 &pwr_info
->lane_rx
);
3801 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
3802 &pwr_info
->lane_tx
);
3804 if (!pwr_info
->lane_rx
|| !pwr_info
->lane_tx
) {
3805 dev_err(hba
->dev
, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3813 * First, get the maximum gears of HS speed.
3814 * If a zero value, it means there is no HSGEAR capability.
3815 * Then, get the maximum gears of PWM speed.
3817 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
), &pwr_info
->gear_rx
);
3818 if (!pwr_info
->gear_rx
) {
3819 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
3820 &pwr_info
->gear_rx
);
3821 if (!pwr_info
->gear_rx
) {
3822 dev_err(hba
->dev
, "%s: invalid max pwm rx gear read = %d\n",
3823 __func__
, pwr_info
->gear_rx
);
3826 pwr_info
->pwr_rx
= SLOW_MODE
;
3829 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
),
3830 &pwr_info
->gear_tx
);
3831 if (!pwr_info
->gear_tx
) {
3832 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
3833 &pwr_info
->gear_tx
);
3834 if (!pwr_info
->gear_tx
) {
3835 dev_err(hba
->dev
, "%s: invalid max pwm tx gear read = %d\n",
3836 __func__
, pwr_info
->gear_tx
);
3839 pwr_info
->pwr_tx
= SLOW_MODE
;
3842 hba
->max_pwr_info
.is_valid
= true;
3846 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
3847 struct ufs_pa_layer_attr
*pwr_mode
)
3851 /* if already configured to the requested pwr_mode */
3852 if (pwr_mode
->gear_rx
== hba
->pwr_info
.gear_rx
&&
3853 pwr_mode
->gear_tx
== hba
->pwr_info
.gear_tx
&&
3854 pwr_mode
->lane_rx
== hba
->pwr_info
.lane_rx
&&
3855 pwr_mode
->lane_tx
== hba
->pwr_info
.lane_tx
&&
3856 pwr_mode
->pwr_rx
== hba
->pwr_info
.pwr_rx
&&
3857 pwr_mode
->pwr_tx
== hba
->pwr_info
.pwr_tx
&&
3858 pwr_mode
->hs_rate
== hba
->pwr_info
.hs_rate
) {
3859 dev_dbg(hba
->dev
, "%s: power already configured\n", __func__
);
3864 * Configure attributes for power mode change with below.
3865 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
3866 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
3869 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXGEAR
), pwr_mode
->gear_rx
);
3870 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVERXDATALANES
),
3872 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
3873 pwr_mode
->pwr_rx
== FAST_MODE
)
3874 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), TRUE
);
3876 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), FALSE
);
3878 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXGEAR
), pwr_mode
->gear_tx
);
3879 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVETXDATALANES
),
3881 if (pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
3882 pwr_mode
->pwr_tx
== FAST_MODE
)
3883 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), TRUE
);
3885 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), FALSE
);
3887 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
3888 pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
3889 pwr_mode
->pwr_rx
== FAST_MODE
||
3890 pwr_mode
->pwr_tx
== FAST_MODE
)
3891 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HSSERIES
),
3894 ret
= ufshcd_uic_change_pwr_mode(hba
, pwr_mode
->pwr_rx
<< 4
3895 | pwr_mode
->pwr_tx
);
3899 "%s: power mode change failed %d\n", __func__
, ret
);
3901 ufshcd_vops_pwr_change_notify(hba
, POST_CHANGE
, NULL
,
3904 memcpy(&hba
->pwr_info
, pwr_mode
,
3905 sizeof(struct ufs_pa_layer_attr
));
3912 * ufshcd_config_pwr_mode - configure a new power mode
3913 * @hba: per-adapter instance
3914 * @desired_pwr_mode: desired power configuration
3916 static int ufshcd_config_pwr_mode(struct ufs_hba
*hba
,
3917 struct ufs_pa_layer_attr
*desired_pwr_mode
)
3919 struct ufs_pa_layer_attr final_params
= { 0 };
3922 ret
= ufshcd_vops_pwr_change_notify(hba
, PRE_CHANGE
,
3923 desired_pwr_mode
, &final_params
);
3926 memcpy(&final_params
, desired_pwr_mode
, sizeof(final_params
));
3928 ret
= ufshcd_change_power_mode(hba
, &final_params
);
3930 ufshcd_print_pwr_info(hba
);
3936 * ufshcd_complete_dev_init() - checks device readiness
3937 * hba: per-adapter instance
3939 * Set fDeviceInit flag and poll until device toggles it.
3941 static int ufshcd_complete_dev_init(struct ufs_hba
*hba
)
3947 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
3948 QUERY_FLAG_IDN_FDEVICEINIT
, NULL
);
3951 "%s setting fDeviceInit flag failed with error %d\n",
3956 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
3957 for (i
= 0; i
< 1000 && !err
&& flag_res
; i
++)
3958 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
3959 QUERY_FLAG_IDN_FDEVICEINIT
, &flag_res
);
3963 "%s reading fDeviceInit flag failed with error %d\n",
3967 "%s fDeviceInit was not cleared by the device\n",
3975 * ufshcd_make_hba_operational - Make UFS controller operational
3976 * @hba: per adapter instance
3978 * To bring UFS host controller to operational state,
3979 * 1. Enable required interrupts
3980 * 2. Configure interrupt aggregation
3981 * 3. Program UTRL and UTMRL base address
3982 * 4. Configure run-stop-registers
3984 * Returns 0 on success, non-zero value on failure
3986 static int ufshcd_make_hba_operational(struct ufs_hba
*hba
)
3991 /* Enable required interrupts */
3992 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_INTRS
);
3994 /* Configure interrupt aggregation */
3995 if (ufshcd_is_intr_aggr_allowed(hba
))
3996 ufshcd_config_intr_aggr(hba
, hba
->nutrs
- 1, INT_AGGR_DEF_TO
);
3998 ufshcd_disable_intr_aggr(hba
);
4000 /* Configure UTRL and UTMRL base address registers */
4001 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
4002 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
4003 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
4004 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
4005 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
4006 REG_UTP_TASK_REQ_LIST_BASE_L
);
4007 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
4008 REG_UTP_TASK_REQ_LIST_BASE_H
);
4011 * Make sure base address and interrupt setup are updated before
4012 * enabling the run/stop registers below.
4017 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4019 reg
= ufshcd_readl(hba
, REG_CONTROLLER_STATUS
);
4020 if (!(ufshcd_get_lists_status(reg
))) {
4021 ufshcd_enable_run_stop_reg(hba
);
4024 "Host controller not ready to process requests");
4034 * ufshcd_hba_stop - Send controller to reset state
4035 * @hba: per adapter instance
4036 * @can_sleep: perform sleep or just spin
4038 static inline void ufshcd_hba_stop(struct ufs_hba
*hba
, bool can_sleep
)
4042 ufshcd_writel(hba
, CONTROLLER_DISABLE
, REG_CONTROLLER_ENABLE
);
4043 err
= ufshcd_wait_for_register(hba
, REG_CONTROLLER_ENABLE
,
4044 CONTROLLER_ENABLE
, CONTROLLER_DISABLE
,
4047 dev_err(hba
->dev
, "%s: Controller disable failed\n", __func__
);
4051 * ufshcd_hba_enable - initialize the controller
4052 * @hba: per adapter instance
4054 * The controller resets itself and controller firmware initialization
4055 * sequence kicks off. When controller is ready it will set
4056 * the Host Controller Enable bit to 1.
4058 * Returns 0 on success, non-zero value on failure
4060 static int ufshcd_hba_enable(struct ufs_hba
*hba
)
4065 * msleep of 1 and 5 used in this function might result in msleep(20),
4066 * but it was necessary to send the UFS FPGA to reset mode during
4067 * development and testing of this driver. msleep can be changed to
4068 * mdelay and retry count can be reduced based on the controller.
4070 if (!ufshcd_is_hba_active(hba
))
4071 /* change controller state to "reset state" */
4072 ufshcd_hba_stop(hba
, true);
4074 /* UniPro link is disabled at this point */
4075 ufshcd_set_link_off(hba
);
4077 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4079 /* start controller initialization sequence */
4080 ufshcd_hba_start(hba
);
4083 * To initialize a UFS host controller HCE bit must be set to 1.
4084 * During initialization the HCE bit value changes from 1->0->1.
4085 * When the host controller completes initialization sequence
4086 * it sets the value of HCE bit to 1. The same HCE bit is read back
4087 * to check if the controller has completed initialization sequence.
4088 * So without this delay the value HCE = 1, set in the previous
4089 * instruction might be read back.
4090 * This delay can be changed based on the controller.
4094 /* wait for the host controller to complete initialization */
4096 while (ufshcd_is_hba_active(hba
)) {
4101 "Controller enable failed\n");
4107 /* enable UIC related interrupts */
4108 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4110 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4115 static int ufshcd_disable_tx_lcc(struct ufs_hba
*hba
, bool peer
)
4117 int tx_lanes
, i
, err
= 0;
4120 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4123 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4125 for (i
= 0; i
< tx_lanes
; i
++) {
4127 err
= ufshcd_dme_set(hba
,
4128 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4129 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4132 err
= ufshcd_dme_peer_set(hba
,
4133 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4134 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4137 dev_err(hba
->dev
, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4138 __func__
, peer
, i
, err
);
4146 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba
*hba
)
4148 return ufshcd_disable_tx_lcc(hba
, true);
4152 * ufshcd_link_startup - Initialize unipro link startup
4153 * @hba: per adapter instance
4155 * Returns 0 for success, non-zero in case of failure
4157 static int ufshcd_link_startup(struct ufs_hba
*hba
)
4160 int retries
= DME_LINKSTARTUP_RETRIES
;
4161 bool link_startup_again
= false;
4164 * If UFS device isn't active then we will have to issue link startup
4165 * 2 times to make sure the device state move to active.
4167 if (!ufshcd_is_ufs_dev_active(hba
))
4168 link_startup_again
= true;
4172 ufshcd_vops_link_startup_notify(hba
, PRE_CHANGE
);
4174 ret
= ufshcd_dme_link_startup(hba
);
4176 /* check if device is detected by inter-connect layer */
4177 if (!ret
&& !ufshcd_is_device_present(hba
)) {
4178 dev_err(hba
->dev
, "%s: Device not present\n", __func__
);
4184 * DME link lost indication is only received when link is up,
4185 * but we can't be sure if the link is up until link startup
4186 * succeeds. So reset the local Uni-Pro and try again.
4188 if (ret
&& ufshcd_hba_enable(hba
))
4190 } while (ret
&& retries
--);
4193 /* failed to get the link up... retire */
4196 if (link_startup_again
) {
4197 link_startup_again
= false;
4198 retries
= DME_LINKSTARTUP_RETRIES
;
4202 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4203 ufshcd_init_pwr_info(hba
);
4204 ufshcd_print_pwr_info(hba
);
4206 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_LCC
) {
4207 ret
= ufshcd_disable_device_tx_lcc(hba
);
4212 /* Include any host controller configuration via UIC commands */
4213 ret
= ufshcd_vops_link_startup_notify(hba
, POST_CHANGE
);
4217 ret
= ufshcd_make_hba_operational(hba
);
4220 dev_err(hba
->dev
, "link startup failed %d\n", ret
);
4221 ufshcd_print_host_state(hba
);
4222 ufshcd_print_pwr_info(hba
);
4223 ufshcd_print_host_regs(hba
);
4229 * ufshcd_verify_dev_init() - Verify device initialization
4230 * @hba: per-adapter instance
4232 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4233 * device Transport Protocol (UTP) layer is ready after a reset.
4234 * If the UTP layer at the device side is not initialized, it may
4235 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4236 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4238 static int ufshcd_verify_dev_init(struct ufs_hba
*hba
)
4243 ufshcd_hold(hba
, false);
4244 mutex_lock(&hba
->dev_cmd
.lock
);
4245 for (retries
= NOP_OUT_RETRIES
; retries
> 0; retries
--) {
4246 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_NOP
,
4249 if (!err
|| err
== -ETIMEDOUT
)
4252 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
4254 mutex_unlock(&hba
->dev_cmd
.lock
);
4255 ufshcd_release(hba
);
4258 dev_err(hba
->dev
, "%s: NOP OUT failed %d\n", __func__
, err
);
4263 * ufshcd_set_queue_depth - set lun queue depth
4264 * @sdev: pointer to SCSI device
4266 * Read bLUQueueDepth value and activate scsi tagged command
4267 * queueing. For WLUN, queue depth is set to 1. For best-effort
4268 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4269 * value that host can queue.
4271 static void ufshcd_set_queue_depth(struct scsi_device
*sdev
)
4276 struct ufs_hba
*hba
;
4278 hba
= shost_priv(sdev
->host
);
4280 lun_qdepth
= hba
->nutrs
;
4281 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
4282 /* Read descriptor*/
4283 ret
= ufshcd_read_unit_desc_param(hba
,
4284 ufshcd_scsi_to_upiu_lun(sdev
->lun
),
4285 UNIT_DESC_PARAM_LU_Q_DEPTH
,
4287 sizeof(lun_qdepth
));
4288 if (!ret
|| ret
== -ENOTSUPP
)
4291 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, ret
);
4294 /* Some WLUN doesn't support unit descriptor */
4295 if (ret
== -EOPNOTSUPP
)
4297 else if (!lun_qdepth
)
4298 /* eventually, we can figure out the real queue depth */
4299 lun_qdepth
= hba
->nutrs
;
4301 lun_qdepth
= min_t(int, lun_qdepth
, hba
->nutrs
);
4303 dev_dbg(hba
->dev
, "%s: activate tcq with queue depth %d\n",
4304 __func__
, lun_qdepth
);
4305 scsi_change_queue_depth(sdev
, lun_qdepth
);
4309 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4310 * @hba: per-adapter instance
4311 * @lun: UFS device lun id
4312 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4314 * Returns 0 in case of success and b_lu_write_protect status would be returned
4315 * @b_lu_write_protect parameter.
4316 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4317 * Returns -EINVAL in case of invalid parameters passed to this function.
4319 static int ufshcd_get_lu_wp(struct ufs_hba
*hba
,
4321 u8
*b_lu_write_protect
)
4325 if (!b_lu_write_protect
)
4328 * According to UFS device spec, RPMB LU can't be write
4329 * protected so skip reading bLUWriteProtect parameter for
4330 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4332 else if (lun
>= UFS_UPIU_MAX_GENERAL_LUN
)
4335 ret
= ufshcd_read_unit_desc_param(hba
,
4337 UNIT_DESC_PARAM_LU_WR_PROTECT
,
4339 sizeof(*b_lu_write_protect
));
4344 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4346 * @hba: per-adapter instance
4347 * @sdev: pointer to SCSI device
4350 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba
*hba
,
4351 struct scsi_device
*sdev
)
4353 if (hba
->dev_info
.f_power_on_wp_en
&&
4354 !hba
->dev_info
.is_lu_power_on_wp
) {
4355 u8 b_lu_write_protect
;
4357 if (!ufshcd_get_lu_wp(hba
, ufshcd_scsi_to_upiu_lun(sdev
->lun
),
4358 &b_lu_write_protect
) &&
4359 (b_lu_write_protect
== UFS_LU_POWER_ON_WP
))
4360 hba
->dev_info
.is_lu_power_on_wp
= true;
4365 * ufshcd_slave_alloc - handle initial SCSI device configurations
4366 * @sdev: pointer to SCSI device
4370 static int ufshcd_slave_alloc(struct scsi_device
*sdev
)
4372 struct ufs_hba
*hba
;
4374 hba
= shost_priv(sdev
->host
);
4376 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4377 sdev
->use_10_for_ms
= 1;
4379 /* allow SCSI layer to restart the device in case of errors */
4380 sdev
->allow_restart
= 1;
4382 /* REPORT SUPPORTED OPERATION CODES is not supported */
4383 sdev
->no_report_opcodes
= 1;
4386 ufshcd_set_queue_depth(sdev
);
4388 ufshcd_get_lu_power_on_wp_status(hba
, sdev
);
4394 * ufshcd_change_queue_depth - change queue depth
4395 * @sdev: pointer to SCSI device
4396 * @depth: required depth to set
4398 * Change queue depth and make sure the max. limits are not crossed.
4400 static int ufshcd_change_queue_depth(struct scsi_device
*sdev
, int depth
)
4402 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
4404 if (depth
> hba
->nutrs
)
4406 return scsi_change_queue_depth(sdev
, depth
);
4410 * ufshcd_slave_configure - adjust SCSI device configurations
4411 * @sdev: pointer to SCSI device
4413 static int ufshcd_slave_configure(struct scsi_device
*sdev
)
4415 struct request_queue
*q
= sdev
->request_queue
;
4417 blk_queue_update_dma_pad(q
, PRDT_DATA_BYTE_COUNT_PAD
- 1);
4418 blk_queue_max_segment_size(q
, PRDT_DATA_BYTE_COUNT_MAX
);
4424 * ufshcd_slave_destroy - remove SCSI device configurations
4425 * @sdev: pointer to SCSI device
4427 static void ufshcd_slave_destroy(struct scsi_device
*sdev
)
4429 struct ufs_hba
*hba
;
4431 hba
= shost_priv(sdev
->host
);
4432 /* Drop the reference as it won't be needed anymore */
4433 if (ufshcd_scsi_to_upiu_lun(sdev
->lun
) == UFS_UPIU_UFS_DEVICE_WLUN
) {
4434 unsigned long flags
;
4436 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4437 hba
->sdev_ufs_device
= NULL
;
4438 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4443 * ufshcd_task_req_compl - handle task management request completion
4444 * @hba: per adapter instance
4445 * @index: index of the completed request
4446 * @resp: task management service response
4448 * Returns non-zero value on error, zero on success
4450 static int ufshcd_task_req_compl(struct ufs_hba
*hba
, u32 index
, u8
*resp
)
4452 struct utp_task_req_desc
*task_req_descp
;
4453 struct utp_upiu_task_rsp
*task_rsp_upiup
;
4454 unsigned long flags
;
4458 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4460 /* Clear completed tasks from outstanding_tasks */
4461 __clear_bit(index
, &hba
->outstanding_tasks
);
4463 task_req_descp
= hba
->utmrdl_base_addr
;
4464 ocs_value
= ufshcd_get_tmr_ocs(&task_req_descp
[index
]);
4466 if (ocs_value
== OCS_SUCCESS
) {
4467 task_rsp_upiup
= (struct utp_upiu_task_rsp
*)
4468 task_req_descp
[index
].task_rsp_upiu
;
4469 task_result
= be32_to_cpu(task_rsp_upiup
->output_param1
);
4470 task_result
= task_result
& MASK_TM_SERVICE_RESP
;
4472 *resp
= (u8
)task_result
;
4474 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n",
4475 __func__
, ocs_value
);
4477 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4483 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4484 * @lrb: pointer to local reference block of completed command
4485 * @scsi_status: SCSI command status
4487 * Returns value base on SCSI command status
4490 ufshcd_scsi_cmd_status(struct ufshcd_lrb
*lrbp
, int scsi_status
)
4494 switch (scsi_status
) {
4495 case SAM_STAT_CHECK_CONDITION
:
4496 ufshcd_copy_sense_data(lrbp
);
4498 result
|= DID_OK
<< 16 |
4499 COMMAND_COMPLETE
<< 8 |
4502 case SAM_STAT_TASK_SET_FULL
:
4504 case SAM_STAT_TASK_ABORTED
:
4505 ufshcd_copy_sense_data(lrbp
);
4506 result
|= scsi_status
;
4509 result
|= DID_ERROR
<< 16;
4511 } /* end of switch */
4517 * ufshcd_transfer_rsp_status - Get overall status of the response
4518 * @hba: per adapter instance
4519 * @lrb: pointer to local reference block of completed command
4521 * Returns result of the command to notify SCSI midlayer
4524 ufshcd_transfer_rsp_status(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
4530 /* overall command status of utrd */
4531 ocs
= ufshcd_get_tr_ocs(lrbp
);
4535 result
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
4536 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
4538 case UPIU_TRANSACTION_RESPONSE
:
4540 * get the response UPIU result to extract
4541 * the SCSI command status
4543 result
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
);
4546 * get the result based on SCSI status response
4547 * to notify the SCSI midlayer of the command status
4549 scsi_status
= result
& MASK_SCSI_STATUS
;
4550 result
= ufshcd_scsi_cmd_status(lrbp
, scsi_status
);
4553 * Currently we are only supporting BKOPs exception
4554 * events hence we can ignore BKOPs exception event
4555 * during power management callbacks. BKOPs exception
4556 * event is not expected to be raised in runtime suspend
4557 * callback as it allows the urgent bkops.
4558 * During system suspend, we are anyway forcefully
4559 * disabling the bkops and if urgent bkops is needed
4560 * it will be enabled on system resume. Long term
4561 * solution could be to abort the system suspend if
4562 * UFS device needs urgent BKOPs.
4564 if (!hba
->pm_op_in_progress
&&
4565 ufshcd_is_exception_event(lrbp
->ucd_rsp_ptr
))
4566 schedule_work(&hba
->eeh_work
);
4568 case UPIU_TRANSACTION_REJECT_UPIU
:
4569 /* TODO: handle Reject UPIU Response */
4570 result
= DID_ERROR
<< 16;
4572 "Reject UPIU not fully implemented\n");
4575 result
= DID_ERROR
<< 16;
4577 "Unexpected request response code = %x\n",
4583 result
|= DID_ABORT
<< 16;
4585 case OCS_INVALID_COMMAND_STATUS
:
4586 result
|= DID_REQUEUE
<< 16;
4588 case OCS_INVALID_CMD_TABLE_ATTR
:
4589 case OCS_INVALID_PRDT_ATTR
:
4590 case OCS_MISMATCH_DATA_BUF_SIZE
:
4591 case OCS_MISMATCH_RESP_UPIU_SIZE
:
4592 case OCS_PEER_COMM_FAILURE
:
4593 case OCS_FATAL_ERROR
:
4595 result
|= DID_ERROR
<< 16;
4597 "OCS error from controller = %x for tag %d\n",
4598 ocs
, lrbp
->task_tag
);
4599 ufshcd_print_host_regs(hba
);
4600 ufshcd_print_host_state(hba
);
4602 } /* end of switch */
4604 if (host_byte(result
) != DID_OK
)
4605 ufshcd_print_trs(hba
, 1 << lrbp
->task_tag
, true);
4610 * ufshcd_uic_cmd_compl - handle completion of uic command
4611 * @hba: per adapter instance
4612 * @intr_status: interrupt status generated by the controller
4614 static void ufshcd_uic_cmd_compl(struct ufs_hba
*hba
, u32 intr_status
)
4616 if ((intr_status
& UIC_COMMAND_COMPL
) && hba
->active_uic_cmd
) {
4617 hba
->active_uic_cmd
->argument2
|=
4618 ufshcd_get_uic_cmd_result(hba
);
4619 hba
->active_uic_cmd
->argument3
=
4620 ufshcd_get_dme_attr_val(hba
);
4621 complete(&hba
->active_uic_cmd
->done
);
4624 if ((intr_status
& UFSHCD_UIC_PWR_MASK
) && hba
->uic_async_done
)
4625 complete(hba
->uic_async_done
);
4629 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4630 * @hba: per adapter instance
4631 * @completed_reqs: requests to complete
4633 static void __ufshcd_transfer_req_compl(struct ufs_hba
*hba
,
4634 unsigned long completed_reqs
)
4636 struct ufshcd_lrb
*lrbp
;
4637 struct scsi_cmnd
*cmd
;
4641 for_each_set_bit(index
, &completed_reqs
, hba
->nutrs
) {
4642 lrbp
= &hba
->lrb
[index
];
4645 ufshcd_add_command_trace(hba
, index
, "complete");
4646 result
= ufshcd_transfer_rsp_status(hba
, lrbp
);
4647 scsi_dma_unmap(cmd
);
4648 cmd
->result
= result
;
4649 /* Mark completed command as NULL in LRB */
4651 clear_bit_unlock(index
, &hba
->lrb_in_use
);
4652 /* Do not touch lrbp after scsi done */
4653 cmd
->scsi_done(cmd
);
4654 __ufshcd_release(hba
);
4655 } else if (lrbp
->command_type
== UTP_CMD_TYPE_DEV_MANAGE
||
4656 lrbp
->command_type
== UTP_CMD_TYPE_UFS_STORAGE
) {
4657 if (hba
->dev_cmd
.complete
) {
4658 ufshcd_add_command_trace(hba
, index
,
4660 complete(hba
->dev_cmd
.complete
);
4663 if (ufshcd_is_clkscaling_supported(hba
))
4664 hba
->clk_scaling
.active_reqs
--;
4667 /* clear corresponding bits of completed commands */
4668 hba
->outstanding_reqs
^= completed_reqs
;
4670 ufshcd_clk_scaling_update_busy(hba
);
4672 /* we might have free'd some tags above */
4673 wake_up(&hba
->dev_cmd
.tag_wq
);
4677 * ufshcd_transfer_req_compl - handle SCSI and query command completion
4678 * @hba: per adapter instance
4680 static void ufshcd_transfer_req_compl(struct ufs_hba
*hba
)
4682 unsigned long completed_reqs
;
4685 /* Resetting interrupt aggregation counters first and reading the
4686 * DOOR_BELL afterward allows us to handle all the completed requests.
4687 * In order to prevent other interrupts starvation the DB is read once
4688 * after reset. The down side of this solution is the possibility of
4689 * false interrupt if device completes another request after resetting
4690 * aggregation and before reading the DB.
4692 if (ufshcd_is_intr_aggr_allowed(hba
))
4693 ufshcd_reset_intr_aggr(hba
);
4695 tr_doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
4696 completed_reqs
= tr_doorbell
^ hba
->outstanding_reqs
;
4698 __ufshcd_transfer_req_compl(hba
, completed_reqs
);
4702 * ufshcd_disable_ee - disable exception event
4703 * @hba: per-adapter instance
4704 * @mask: exception event to disable
4706 * Disables exception event in the device so that the EVENT_ALERT
4709 * Returns zero on success, non-zero error value on failure.
4711 static int ufshcd_disable_ee(struct ufs_hba
*hba
, u16 mask
)
4716 if (!(hba
->ee_ctrl_mask
& mask
))
4719 val
= hba
->ee_ctrl_mask
& ~mask
;
4720 val
&= 0xFFFF; /* 2 bytes */
4721 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
4722 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0, &val
);
4724 hba
->ee_ctrl_mask
&= ~mask
;
4730 * ufshcd_enable_ee - enable exception event
4731 * @hba: per-adapter instance
4732 * @mask: exception event to enable
4734 * Enable corresponding exception event in the device to allow
4735 * device to alert host in critical scenarios.
4737 * Returns zero on success, non-zero error value on failure.
4739 static int ufshcd_enable_ee(struct ufs_hba
*hba
, u16 mask
)
4744 if (hba
->ee_ctrl_mask
& mask
)
4747 val
= hba
->ee_ctrl_mask
| mask
;
4748 val
&= 0xFFFF; /* 2 bytes */
4749 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
4750 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0, &val
);
4752 hba
->ee_ctrl_mask
|= mask
;
4758 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4759 * @hba: per-adapter instance
4761 * Allow device to manage background operations on its own. Enabling
4762 * this might lead to inconsistent latencies during normal data transfers
4763 * as the device is allowed to manage its own way of handling background
4766 * Returns zero on success, non-zero on failure.
4768 static int ufshcd_enable_auto_bkops(struct ufs_hba
*hba
)
4772 if (hba
->auto_bkops_enabled
)
4775 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
4776 QUERY_FLAG_IDN_BKOPS_EN
, NULL
);
4778 dev_err(hba
->dev
, "%s: failed to enable bkops %d\n",
4783 hba
->auto_bkops_enabled
= true;
4784 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Enabled");
4786 /* No need of URGENT_BKOPS exception from the device */
4787 err
= ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
4789 dev_err(hba
->dev
, "%s: failed to disable exception event %d\n",
4796 * ufshcd_disable_auto_bkops - block device in doing background operations
4797 * @hba: per-adapter instance
4799 * Disabling background operations improves command response latency but
4800 * has drawback of device moving into critical state where the device is
4801 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
4802 * host is idle so that BKOPS are managed effectively without any negative
4805 * Returns zero on success, non-zero on failure.
4807 static int ufshcd_disable_auto_bkops(struct ufs_hba
*hba
)
4811 if (!hba
->auto_bkops_enabled
)
4815 * If host assisted BKOPs is to be enabled, make sure
4816 * urgent bkops exception is allowed.
4818 err
= ufshcd_enable_ee(hba
, MASK_EE_URGENT_BKOPS
);
4820 dev_err(hba
->dev
, "%s: failed to enable exception event %d\n",
4825 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_CLEAR_FLAG
,
4826 QUERY_FLAG_IDN_BKOPS_EN
, NULL
);
4828 dev_err(hba
->dev
, "%s: failed to disable bkops %d\n",
4830 ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
4834 hba
->auto_bkops_enabled
= false;
4835 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Disabled");
4841 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
4842 * @hba: per adapter instance
4844 * After a device reset the device may toggle the BKOPS_EN flag
4845 * to default value. The s/w tracking variables should be updated
4846 * as well. This function would change the auto-bkops state based on
4847 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
4849 static void ufshcd_force_reset_auto_bkops(struct ufs_hba
*hba
)
4851 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
)) {
4852 hba
->auto_bkops_enabled
= false;
4853 hba
->ee_ctrl_mask
|= MASK_EE_URGENT_BKOPS
;
4854 ufshcd_enable_auto_bkops(hba
);
4856 hba
->auto_bkops_enabled
= true;
4857 hba
->ee_ctrl_mask
&= ~MASK_EE_URGENT_BKOPS
;
4858 ufshcd_disable_auto_bkops(hba
);
4862 static inline int ufshcd_get_bkops_status(struct ufs_hba
*hba
, u32
*status
)
4864 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
4865 QUERY_ATTR_IDN_BKOPS_STATUS
, 0, 0, status
);
4869 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
4870 * @hba: per-adapter instance
4871 * @status: bkops_status value
4873 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
4874 * flag in the device to permit background operations if the device
4875 * bkops_status is greater than or equal to "status" argument passed to
4876 * this function, disable otherwise.
4878 * Returns 0 for success, non-zero in case of failure.
4880 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
4881 * to know whether auto bkops is enabled or disabled after this function
4882 * returns control to it.
4884 static int ufshcd_bkops_ctrl(struct ufs_hba
*hba
,
4885 enum bkops_status status
)
4888 u32 curr_status
= 0;
4890 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
4892 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
4895 } else if (curr_status
> BKOPS_STATUS_MAX
) {
4896 dev_err(hba
->dev
, "%s: invalid BKOPS status %d\n",
4897 __func__
, curr_status
);
4902 if (curr_status
>= status
)
4903 err
= ufshcd_enable_auto_bkops(hba
);
4905 err
= ufshcd_disable_auto_bkops(hba
);
4911 * ufshcd_urgent_bkops - handle urgent bkops exception event
4912 * @hba: per-adapter instance
4914 * Enable fBackgroundOpsEn flag in the device to permit background
4917 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
4918 * and negative error value for any other failure.
4920 static int ufshcd_urgent_bkops(struct ufs_hba
*hba
)
4922 return ufshcd_bkops_ctrl(hba
, hba
->urgent_bkops_lvl
);
4925 static inline int ufshcd_get_ee_status(struct ufs_hba
*hba
, u32
*status
)
4927 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
4928 QUERY_ATTR_IDN_EE_STATUS
, 0, 0, status
);
4931 static void ufshcd_bkops_exception_event_handler(struct ufs_hba
*hba
)
4934 u32 curr_status
= 0;
4936 if (hba
->is_urgent_bkops_lvl_checked
)
4937 goto enable_auto_bkops
;
4939 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
4941 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
4947 * We are seeing that some devices are raising the urgent bkops
4948 * exception events even when BKOPS status doesn't indicate performace
4949 * impacted or critical. Handle these device by determining their urgent
4950 * bkops status at runtime.
4952 if (curr_status
< BKOPS_STATUS_PERF_IMPACT
) {
4953 dev_err(hba
->dev
, "%s: device raised urgent BKOPS exception for bkops status %d\n",
4954 __func__
, curr_status
);
4955 /* update the current status as the urgent bkops level */
4956 hba
->urgent_bkops_lvl
= curr_status
;
4957 hba
->is_urgent_bkops_lvl_checked
= true;
4961 err
= ufshcd_enable_auto_bkops(hba
);
4964 dev_err(hba
->dev
, "%s: failed to handle urgent bkops %d\n",
4969 * ufshcd_exception_event_handler - handle exceptions raised by device
4970 * @work: pointer to work data
4972 * Read bExceptionEventStatus attribute from the device and handle the
4973 * exception event accordingly.
4975 static void ufshcd_exception_event_handler(struct work_struct
*work
)
4977 struct ufs_hba
*hba
;
4980 hba
= container_of(work
, struct ufs_hba
, eeh_work
);
4982 pm_runtime_get_sync(hba
->dev
);
4983 err
= ufshcd_get_ee_status(hba
, &status
);
4985 dev_err(hba
->dev
, "%s: failed to get exception status %d\n",
4990 status
&= hba
->ee_ctrl_mask
;
4992 if (status
& MASK_EE_URGENT_BKOPS
)
4993 ufshcd_bkops_exception_event_handler(hba
);
4996 pm_runtime_put_sync(hba
->dev
);
5000 /* Complete requests that have door-bell cleared */
5001 static void ufshcd_complete_requests(struct ufs_hba
*hba
)
5003 ufshcd_transfer_req_compl(hba
);
5004 ufshcd_tmc_handler(hba
);
5008 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5009 * to recover from the DL NAC errors or not.
5010 * @hba: per-adapter instance
5012 * Returns true if error handling is required, false otherwise
5014 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba
*hba
)
5016 unsigned long flags
;
5017 bool err_handling
= true;
5019 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5021 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5022 * device fatal error and/or DL NAC & REPLAY timeout errors.
5024 if (hba
->saved_err
& (CONTROLLER_FATAL_ERROR
| SYSTEM_BUS_FATAL_ERROR
))
5027 if ((hba
->saved_err
& DEVICE_FATAL_ERROR
) ||
5028 ((hba
->saved_err
& UIC_ERROR
) &&
5029 (hba
->saved_uic_err
& UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))
5032 if ((hba
->saved_err
& UIC_ERROR
) &&
5033 (hba
->saved_uic_err
& UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)) {
5036 * wait for 50ms to see if we can get any other errors or not.
5038 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5040 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5043 * now check if we have got any other severe errors other than
5046 if ((hba
->saved_err
& INT_FATAL_ERRORS
) ||
5047 ((hba
->saved_err
& UIC_ERROR
) &&
5048 (hba
->saved_uic_err
& ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)))
5052 * As DL NAC is the only error received so far, send out NOP
5053 * command to confirm if link is still active or not.
5054 * - If we don't get any response then do error recovery.
5055 * - If we get response then clear the DL NAC error bit.
5058 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5059 err
= ufshcd_verify_dev_init(hba
);
5060 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5065 /* Link seems to be alive hence ignore the DL NAC errors */
5066 if (hba
->saved_uic_err
== UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)
5067 hba
->saved_err
&= ~UIC_ERROR
;
5068 /* clear NAC error */
5069 hba
->saved_uic_err
&= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
5070 if (!hba
->saved_uic_err
) {
5071 err_handling
= false;
5076 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5077 return err_handling
;
5081 * ufshcd_err_handler - handle UFS errors that require s/w attention
5082 * @work: pointer to work structure
5084 static void ufshcd_err_handler(struct work_struct
*work
)
5086 struct ufs_hba
*hba
;
5087 unsigned long flags
;
5092 bool needs_reset
= false;
5094 hba
= container_of(work
, struct ufs_hba
, eh_work
);
5096 pm_runtime_get_sync(hba
->dev
);
5097 ufshcd_hold(hba
, false);
5099 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5100 if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
5103 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
5104 ufshcd_set_eh_in_progress(hba
);
5106 /* Complete requests that have door-bell cleared by h/w */
5107 ufshcd_complete_requests(hba
);
5109 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
5112 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5113 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5114 ret
= ufshcd_quirk_dl_nac_errors(hba
);
5115 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5117 goto skip_err_handling
;
5119 if ((hba
->saved_err
& INT_FATAL_ERRORS
) ||
5120 ((hba
->saved_err
& UIC_ERROR
) &&
5121 (hba
->saved_uic_err
& (UFSHCD_UIC_DL_PA_INIT_ERROR
|
5122 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
|
5123 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
))))
5127 * if host reset is required then skip clearing the pending
5128 * transfers forcefully because they will automatically get
5129 * cleared after link startup.
5132 goto skip_pending_xfer_clear
;
5134 /* release lock as clear command might sleep */
5135 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5136 /* Clear pending transfer requests */
5137 for_each_set_bit(tag
, &hba
->outstanding_reqs
, hba
->nutrs
) {
5138 if (ufshcd_clear_cmd(hba
, tag
)) {
5140 goto lock_skip_pending_xfer_clear
;
5144 /* Clear pending task management requests */
5145 for_each_set_bit(tag
, &hba
->outstanding_tasks
, hba
->nutmrs
) {
5146 if (ufshcd_clear_tm_cmd(hba
, tag
)) {
5148 goto lock_skip_pending_xfer_clear
;
5152 lock_skip_pending_xfer_clear
:
5153 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5155 /* Complete the requests that are cleared by s/w */
5156 ufshcd_complete_requests(hba
);
5158 if (err_xfer
|| err_tm
)
5161 skip_pending_xfer_clear
:
5162 /* Fatal errors need reset */
5164 unsigned long max_doorbells
= (1UL << hba
->nutrs
) - 1;
5167 * ufshcd_reset_and_restore() does the link reinitialization
5168 * which will need atleast one empty doorbell slot to send the
5169 * device management commands (NOP and query commands).
5170 * If there is no slot empty at this moment then free up last
5173 if (hba
->outstanding_reqs
== max_doorbells
)
5174 __ufshcd_transfer_req_compl(hba
,
5175 (1UL << (hba
->nutrs
- 1)));
5177 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5178 err
= ufshcd_reset_and_restore(hba
);
5179 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5181 dev_err(hba
->dev
, "%s: reset and restore failed\n",
5183 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
5186 * Inform scsi mid-layer that we did reset and allow to handle
5187 * Unit Attention properly.
5189 scsi_report_bus_reset(hba
->host
, 0);
5191 hba
->saved_uic_err
= 0;
5196 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
5197 if (hba
->saved_err
|| hba
->saved_uic_err
)
5198 dev_err_ratelimited(hba
->dev
, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5199 __func__
, hba
->saved_err
, hba
->saved_uic_err
);
5202 ufshcd_clear_eh_in_progress(hba
);
5205 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5206 scsi_unblock_requests(hba
->host
);
5207 ufshcd_release(hba
);
5208 pm_runtime_put_sync(hba
->dev
);
5211 static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist
*reg_hist
,
5214 reg_hist
->reg
[reg_hist
->pos
] = reg
;
5215 reg_hist
->tstamp
[reg_hist
->pos
] = ktime_get();
5216 reg_hist
->pos
= (reg_hist
->pos
+ 1) % UIC_ERR_REG_HIST_LENGTH
;
5220 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5221 * @hba: per-adapter instance
5223 static void ufshcd_update_uic_error(struct ufs_hba
*hba
)
5227 /* PHY layer lane error */
5228 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
5229 /* Ignore LINERESET indication, as this is not an error */
5230 if ((reg
& UIC_PHY_ADAPTER_LAYER_ERROR
) &&
5231 (reg
& UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK
)) {
5233 * To know whether this error is fatal or not, DB timeout
5234 * must be checked but this error is handled separately.
5236 dev_dbg(hba
->dev
, "%s: UIC Lane error reported\n", __func__
);
5237 ufshcd_update_uic_reg_hist(&hba
->ufs_stats
.pa_err
, reg
);
5240 /* PA_INIT_ERROR is fatal and needs UIC reset */
5241 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DATA_LINK_LAYER
);
5243 ufshcd_update_uic_reg_hist(&hba
->ufs_stats
.dl_err
, reg
);
5245 if (reg
& UIC_DATA_LINK_LAYER_ERROR_PA_INIT
)
5246 hba
->uic_error
|= UFSHCD_UIC_DL_PA_INIT_ERROR
;
5247 else if (hba
->dev_quirks
&
5248 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
5249 if (reg
& UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED
)
5251 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
5252 else if (reg
& UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT
)
5253 hba
->uic_error
|= UFSHCD_UIC_DL_TCx_REPLAY_ERROR
;
5256 /* UIC NL/TL/DME errors needs software retry */
5257 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_NETWORK_LAYER
);
5259 ufshcd_update_uic_reg_hist(&hba
->ufs_stats
.nl_err
, reg
);
5260 hba
->uic_error
|= UFSHCD_UIC_NL_ERROR
;
5263 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_TRANSPORT_LAYER
);
5265 ufshcd_update_uic_reg_hist(&hba
->ufs_stats
.tl_err
, reg
);
5266 hba
->uic_error
|= UFSHCD_UIC_TL_ERROR
;
5269 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DME
);
5271 ufshcd_update_uic_reg_hist(&hba
->ufs_stats
.dme_err
, reg
);
5272 hba
->uic_error
|= UFSHCD_UIC_DME_ERROR
;
5275 dev_dbg(hba
->dev
, "%s: UIC error flags = 0x%08x\n",
5276 __func__
, hba
->uic_error
);
5280 * ufshcd_check_errors - Check for errors that need s/w attention
5281 * @hba: per-adapter instance
5283 static void ufshcd_check_errors(struct ufs_hba
*hba
)
5285 bool queue_eh_work
= false;
5287 if (hba
->errors
& INT_FATAL_ERRORS
)
5288 queue_eh_work
= true;
5290 if (hba
->errors
& UIC_ERROR
) {
5292 ufshcd_update_uic_error(hba
);
5294 queue_eh_work
= true;
5297 if (queue_eh_work
) {
5299 * update the transfer error masks to sticky bits, let's do this
5300 * irrespective of current ufshcd_state.
5302 hba
->saved_err
|= hba
->errors
;
5303 hba
->saved_uic_err
|= hba
->uic_error
;
5305 /* handle fatal errors only when link is functional */
5306 if (hba
->ufshcd_state
== UFSHCD_STATE_OPERATIONAL
) {
5307 /* block commands from scsi mid-layer */
5308 scsi_block_requests(hba
->host
);
5310 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED
;
5312 /* dump controller state before resetting */
5313 if (hba
->saved_err
& (INT_FATAL_ERRORS
| UIC_ERROR
)) {
5314 bool pr_prdt
= !!(hba
->saved_err
&
5315 SYSTEM_BUS_FATAL_ERROR
);
5317 dev_err(hba
->dev
, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5318 __func__
, hba
->saved_err
,
5319 hba
->saved_uic_err
);
5321 ufshcd_print_host_regs(hba
);
5322 ufshcd_print_pwr_info(hba
);
5323 ufshcd_print_tmrs(hba
, hba
->outstanding_tasks
);
5324 ufshcd_print_trs(hba
, hba
->outstanding_reqs
,
5327 schedule_work(&hba
->eh_work
);
5331 * if (!queue_eh_work) -
5332 * Other errors are either non-fatal where host recovers
5333 * itself without s/w intervention or errors that will be
5334 * handled by the SCSI core layer.
5339 * ufshcd_tmc_handler - handle task management function completion
5340 * @hba: per adapter instance
5342 static void ufshcd_tmc_handler(struct ufs_hba
*hba
)
5346 tm_doorbell
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
5347 hba
->tm_condition
= tm_doorbell
^ hba
->outstanding_tasks
;
5348 wake_up(&hba
->tm_wq
);
5352 * ufshcd_sl_intr - Interrupt service routine
5353 * @hba: per adapter instance
5354 * @intr_status: contains interrupts generated by the controller
5356 static void ufshcd_sl_intr(struct ufs_hba
*hba
, u32 intr_status
)
5358 hba
->errors
= UFSHCD_ERROR_MASK
& intr_status
;
5360 ufshcd_check_errors(hba
);
5362 if (intr_status
& UFSHCD_UIC_MASK
)
5363 ufshcd_uic_cmd_compl(hba
, intr_status
);
5365 if (intr_status
& UTP_TASK_REQ_COMPL
)
5366 ufshcd_tmc_handler(hba
);
5368 if (intr_status
& UTP_TRANSFER_REQ_COMPL
)
5369 ufshcd_transfer_req_compl(hba
);
5373 * ufshcd_intr - Main interrupt service routine
5375 * @__hba: pointer to adapter instance
5377 * Returns IRQ_HANDLED - If interrupt is valid
5378 * IRQ_NONE - If invalid interrupt
5380 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
)
5382 u32 intr_status
, enabled_intr_status
;
5383 irqreturn_t retval
= IRQ_NONE
;
5384 struct ufs_hba
*hba
= __hba
;
5386 spin_lock(hba
->host
->host_lock
);
5387 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
5388 enabled_intr_status
=
5389 intr_status
& ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
5392 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
5394 if (enabled_intr_status
) {
5395 ufshcd_sl_intr(hba
, enabled_intr_status
);
5396 retval
= IRQ_HANDLED
;
5398 spin_unlock(hba
->host
->host_lock
);
5402 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
)
5405 u32 mask
= 1 << tag
;
5406 unsigned long flags
;
5408 if (!test_bit(tag
, &hba
->outstanding_tasks
))
5411 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5412 ufshcd_writel(hba
, ~(1 << tag
), REG_UTP_TASK_REQ_LIST_CLEAR
);
5413 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5415 /* poll for max. 1 sec to clear door bell register by h/w */
5416 err
= ufshcd_wait_for_register(hba
,
5417 REG_UTP_TASK_REQ_DOOR_BELL
,
5418 mask
, 0, 1000, 1000, true);
5424 * ufshcd_issue_tm_cmd - issues task management commands to controller
5425 * @hba: per adapter instance
5426 * @lun_id: LUN ID to which TM command is sent
5427 * @task_id: task ID to which the TM command is applicable
5428 * @tm_function: task management function opcode
5429 * @tm_response: task management service response return value
5431 * Returns non-zero value on error, zero on success.
5433 static int ufshcd_issue_tm_cmd(struct ufs_hba
*hba
, int lun_id
, int task_id
,
5434 u8 tm_function
, u8
*tm_response
)
5436 struct utp_task_req_desc
*task_req_descp
;
5437 struct utp_upiu_task_req
*task_req_upiup
;
5438 struct Scsi_Host
*host
;
5439 unsigned long flags
;
5447 * Get free slot, sleep if slots are unavailable.
5448 * Even though we use wait_event() which sleeps indefinitely,
5449 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5451 wait_event(hba
->tm_tag_wq
, ufshcd_get_tm_free_slot(hba
, &free_slot
));
5452 ufshcd_hold(hba
, false);
5454 spin_lock_irqsave(host
->host_lock
, flags
);
5455 task_req_descp
= hba
->utmrdl_base_addr
;
5456 task_req_descp
+= free_slot
;
5458 /* Configure task request descriptor */
5459 task_req_descp
->header
.dword_0
= cpu_to_le32(UTP_REQ_DESC_INT_CMD
);
5460 task_req_descp
->header
.dword_2
=
5461 cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
5463 /* Configure task request UPIU */
5465 (struct utp_upiu_task_req
*) task_req_descp
->task_req_upiu
;
5466 task_tag
= hba
->nutrs
+ free_slot
;
5467 task_req_upiup
->header
.dword_0
=
5468 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ
, 0,
5470 task_req_upiup
->header
.dword_1
=
5471 UPIU_HEADER_DWORD(0, tm_function
, 0, 0);
5473 * The host shall provide the same value for LUN field in the basic
5474 * header and for Input Parameter.
5476 task_req_upiup
->input_param1
= cpu_to_be32(lun_id
);
5477 task_req_upiup
->input_param2
= cpu_to_be32(task_id
);
5479 ufshcd_vops_setup_task_mgmt(hba
, free_slot
, tm_function
);
5481 /* send command to the controller */
5482 __set_bit(free_slot
, &hba
->outstanding_tasks
);
5484 /* Make sure descriptors are ready before ringing the task doorbell */
5487 ufshcd_writel(hba
, 1 << free_slot
, REG_UTP_TASK_REQ_DOOR_BELL
);
5488 /* Make sure that doorbell is committed immediately */
5491 spin_unlock_irqrestore(host
->host_lock
, flags
);
5493 /* wait until the task management command is completed */
5494 err
= wait_event_timeout(hba
->tm_wq
,
5495 test_bit(free_slot
, &hba
->tm_condition
),
5496 msecs_to_jiffies(TM_CMD_TIMEOUT
));
5498 dev_err(hba
->dev
, "%s: task management cmd 0x%.2x timed-out\n",
5499 __func__
, tm_function
);
5500 if (ufshcd_clear_tm_cmd(hba
, free_slot
))
5501 dev_WARN(hba
->dev
, "%s: unable clear tm cmd (slot %d) after timeout\n",
5502 __func__
, free_slot
);
5505 err
= ufshcd_task_req_compl(hba
, free_slot
, tm_response
);
5508 clear_bit(free_slot
, &hba
->tm_condition
);
5509 ufshcd_put_tm_slot(hba
, free_slot
);
5510 wake_up(&hba
->tm_tag_wq
);
5512 ufshcd_release(hba
);
5517 * ufshcd_eh_device_reset_handler - device reset handler registered to
5519 * @cmd: SCSI command pointer
5521 * Returns SUCCESS/FAILED
5523 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd
*cmd
)
5525 struct Scsi_Host
*host
;
5526 struct ufs_hba
*hba
;
5531 struct ufshcd_lrb
*lrbp
;
5532 unsigned long flags
;
5534 host
= cmd
->device
->host
;
5535 hba
= shost_priv(host
);
5536 tag
= cmd
->request
->tag
;
5538 lrbp
= &hba
->lrb
[tag
];
5539 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, 0, UFS_LOGICAL_RESET
, &resp
);
5540 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
5546 /* clear the commands that were pending for corresponding LUN */
5547 for_each_set_bit(pos
, &hba
->outstanding_reqs
, hba
->nutrs
) {
5548 if (hba
->lrb
[pos
].lun
== lrbp
->lun
) {
5549 err
= ufshcd_clear_cmd(hba
, pos
);
5554 spin_lock_irqsave(host
->host_lock
, flags
);
5555 ufshcd_transfer_req_compl(hba
);
5556 spin_unlock_irqrestore(host
->host_lock
, flags
);
5559 hba
->req_abort_count
= 0;
5563 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
5569 static void ufshcd_set_req_abort_skip(struct ufs_hba
*hba
, unsigned long bitmap
)
5571 struct ufshcd_lrb
*lrbp
;
5574 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
5575 lrbp
= &hba
->lrb
[tag
];
5576 lrbp
->req_abort_skip
= true;
5581 * ufshcd_abort - abort a specific command
5582 * @cmd: SCSI command pointer
5584 * Abort the pending command in device by sending UFS_ABORT_TASK task management
5585 * command, and in host controller by clearing the door-bell register. There can
5586 * be race between controller sending the command to the device while abort is
5587 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
5588 * really issued and then try to abort it.
5590 * Returns SUCCESS/FAILED
5592 static int ufshcd_abort(struct scsi_cmnd
*cmd
)
5594 struct Scsi_Host
*host
;
5595 struct ufs_hba
*hba
;
5596 unsigned long flags
;
5601 struct ufshcd_lrb
*lrbp
;
5604 host
= cmd
->device
->host
;
5605 hba
= shost_priv(host
);
5606 tag
= cmd
->request
->tag
;
5607 lrbp
= &hba
->lrb
[tag
];
5608 if (!ufshcd_valid_tag(hba
, tag
)) {
5610 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
5611 __func__
, tag
, cmd
, cmd
->request
);
5616 * Task abort to the device W-LUN is illegal. When this command
5617 * will fail, due to spec violation, scsi err handling next step
5618 * will be to send LU reset which, again, is a spec violation.
5619 * To avoid these unnecessary/illegal step we skip to the last error
5620 * handling stage: reset and restore.
5622 if (lrbp
->lun
== UFS_UPIU_UFS_DEVICE_WLUN
)
5623 return ufshcd_eh_host_reset_handler(cmd
);
5625 ufshcd_hold(hba
, false);
5626 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
5627 /* If command is already aborted/completed, return SUCCESS */
5628 if (!(test_bit(tag
, &hba
->outstanding_reqs
))) {
5630 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
5631 __func__
, tag
, hba
->outstanding_reqs
, reg
);
5635 if (!(reg
& (1 << tag
))) {
5637 "%s: cmd was completed, but without a notifying intr, tag = %d",
5641 /* Print Transfer Request of aborted task */
5642 dev_err(hba
->dev
, "%s: Device abort task at tag %d\n", __func__
, tag
);
5645 * Print detailed info about aborted request.
5646 * As more than one request might get aborted at the same time,
5647 * print full information only for the first aborted request in order
5648 * to reduce repeated printouts. For other aborted requests only print
5651 scsi_print_command(hba
->lrb
[tag
].cmd
);
5652 if (!hba
->req_abort_count
) {
5653 ufshcd_print_host_regs(hba
);
5654 ufshcd_print_host_state(hba
);
5655 ufshcd_print_pwr_info(hba
);
5656 ufshcd_print_trs(hba
, 1 << tag
, true);
5658 ufshcd_print_trs(hba
, 1 << tag
, false);
5660 hba
->req_abort_count
++;
5662 /* Skip task abort in case previous aborts failed and report failure */
5663 if (lrbp
->req_abort_skip
) {
5668 for (poll_cnt
= 100; poll_cnt
; poll_cnt
--) {
5669 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
5670 UFS_QUERY_TASK
, &resp
);
5671 if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED
) {
5672 /* cmd pending in the device */
5673 dev_err(hba
->dev
, "%s: cmd pending in the device. tag = %d\n",
5676 } else if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
5678 * cmd not pending in the device, check if it is
5681 dev_err(hba
->dev
, "%s: cmd at tag %d not pending in the device.\n",
5683 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
5684 if (reg
& (1 << tag
)) {
5685 /* sleep for max. 200us to stabilize */
5686 usleep_range(100, 200);
5689 /* command completed already */
5690 dev_err(hba
->dev
, "%s: cmd at tag %d successfully cleared from DB.\n",
5695 "%s: no response from device. tag = %d, err %d\n",
5696 __func__
, tag
, err
);
5698 err
= resp
; /* service response error */
5708 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
5709 UFS_ABORT_TASK
, &resp
);
5710 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
5712 err
= resp
; /* service response error */
5713 dev_err(hba
->dev
, "%s: issued. tag = %d, err %d\n",
5714 __func__
, tag
, err
);
5719 err
= ufshcd_clear_cmd(hba
, tag
);
5721 dev_err(hba
->dev
, "%s: Failed clearing cmd at tag %d, err %d\n",
5722 __func__
, tag
, err
);
5726 scsi_dma_unmap(cmd
);
5728 spin_lock_irqsave(host
->host_lock
, flags
);
5729 ufshcd_outstanding_req_clear(hba
, tag
);
5730 hba
->lrb
[tag
].cmd
= NULL
;
5731 spin_unlock_irqrestore(host
->host_lock
, flags
);
5733 clear_bit_unlock(tag
, &hba
->lrb_in_use
);
5734 wake_up(&hba
->dev_cmd
.tag_wq
);
5740 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
5741 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
5746 * This ufshcd_release() corresponds to the original scsi cmd that got
5747 * aborted here (as we won't get any IRQ for it).
5749 ufshcd_release(hba
);
5754 * ufshcd_host_reset_and_restore - reset and restore host controller
5755 * @hba: per-adapter instance
5757 * Note that host controller reset may issue DME_RESET to
5758 * local and remote (device) Uni-Pro stack and the attributes
5759 * are reset to default state.
5761 * Returns zero on success, non-zero on failure
5763 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
)
5766 unsigned long flags
;
5768 /* Reset the host controller */
5769 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5770 ufshcd_hba_stop(hba
, false);
5771 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5773 /* scale up clocks to max frequency before full reinitialization */
5774 ufshcd_scale_clks(hba
, true);
5776 err
= ufshcd_hba_enable(hba
);
5780 /* Establish the link again and restore the device */
5781 err
= ufshcd_probe_hba(hba
);
5783 if (!err
&& (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
))
5787 dev_err(hba
->dev
, "%s: Host init failed %d\n", __func__
, err
);
5793 * ufshcd_reset_and_restore - reset and re-initialize host/device
5794 * @hba: per-adapter instance
5796 * Reset and recover device, host and re-establish link. This
5797 * is helpful to recover the communication in fatal error conditions.
5799 * Returns zero on success, non-zero on failure
5801 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
)
5804 unsigned long flags
;
5805 int retries
= MAX_HOST_RESET_RETRIES
;
5808 err
= ufshcd_host_reset_and_restore(hba
);
5809 } while (err
&& --retries
);
5812 * After reset the door-bell might be cleared, complete
5813 * outstanding requests in s/w here.
5815 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5816 ufshcd_transfer_req_compl(hba
);
5817 ufshcd_tmc_handler(hba
);
5818 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5824 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
5825 * @cmd - SCSI command pointer
5827 * Returns SUCCESS/FAILED
5829 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
)
5832 unsigned long flags
;
5833 struct ufs_hba
*hba
;
5835 hba
= shost_priv(cmd
->device
->host
);
5837 ufshcd_hold(hba
, false);
5839 * Check if there is any race with fatal error handling.
5840 * If so, wait for it to complete. Even though fatal error
5841 * handling does reset and restore in some cases, don't assume
5842 * anything out of it. We are just avoiding race here.
5845 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5846 if (!(work_pending(&hba
->eh_work
) ||
5847 hba
->ufshcd_state
== UFSHCD_STATE_RESET
))
5849 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5850 dev_dbg(hba
->dev
, "%s: reset in progress\n", __func__
);
5851 flush_work(&hba
->eh_work
);
5854 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
5855 ufshcd_set_eh_in_progress(hba
);
5856 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5858 err
= ufshcd_reset_and_restore(hba
);
5860 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5863 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
5866 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
5868 ufshcd_clear_eh_in_progress(hba
);
5869 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5871 ufshcd_release(hba
);
5876 * ufshcd_get_max_icc_level - calculate the ICC level
5877 * @sup_curr_uA: max. current supported by the regulator
5878 * @start_scan: row at the desc table to start scan from
5879 * @buff: power descriptor buffer
5881 * Returns calculated max ICC level for specific regulator
5883 static u32
ufshcd_get_max_icc_level(int sup_curr_uA
, u32 start_scan
, char *buff
)
5890 for (i
= start_scan
; i
>= 0; i
--) {
5891 data
= be16_to_cpup((__be16
*)&buff
[2 * i
]);
5892 unit
= (data
& ATTR_ICC_LVL_UNIT_MASK
) >>
5893 ATTR_ICC_LVL_UNIT_OFFSET
;
5894 curr_uA
= data
& ATTR_ICC_LVL_VALUE_MASK
;
5896 case UFSHCD_NANO_AMP
:
5897 curr_uA
= curr_uA
/ 1000;
5899 case UFSHCD_MILI_AMP
:
5900 curr_uA
= curr_uA
* 1000;
5903 curr_uA
= curr_uA
* 1000 * 1000;
5905 case UFSHCD_MICRO_AMP
:
5909 if (sup_curr_uA
>= curr_uA
)
5914 pr_err("%s: Couldn't find valid icc_level = %d", __func__
, i
);
5921 * ufshcd_calc_icc_level - calculate the max ICC level
5922 * In case regulators are not initialized we'll return 0
5923 * @hba: per-adapter instance
5924 * @desc_buf: power descriptor buffer to extract ICC levels from.
5925 * @len: length of desc_buff
5927 * Returns calculated ICC level
5929 static u32
ufshcd_find_max_sup_active_icc_level(struct ufs_hba
*hba
,
5930 u8
*desc_buf
, int len
)
5934 if (!hba
->vreg_info
.vcc
|| !hba
->vreg_info
.vccq
||
5935 !hba
->vreg_info
.vccq2
) {
5937 "%s: Regulator capability was not set, actvIccLevel=%d",
5938 __func__
, icc_level
);
5942 if (hba
->vreg_info
.vcc
)
5943 icc_level
= ufshcd_get_max_icc_level(
5944 hba
->vreg_info
.vcc
->max_uA
,
5945 POWER_DESC_MAX_ACTV_ICC_LVLS
- 1,
5946 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCC_0
]);
5948 if (hba
->vreg_info
.vccq
)
5949 icc_level
= ufshcd_get_max_icc_level(
5950 hba
->vreg_info
.vccq
->max_uA
,
5952 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ_0
]);
5954 if (hba
->vreg_info
.vccq2
)
5955 icc_level
= ufshcd_get_max_icc_level(
5956 hba
->vreg_info
.vccq2
->max_uA
,
5958 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ2_0
]);
5963 static int ufshcd_set_icc_levels_attr(struct ufs_hba
*hba
, u32 icc_level
)
5968 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
5969 /* write attribute */
5970 ret
= ufshcd_query_attr(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
5971 QUERY_ATTR_IDN_ACTIVE_ICC_LVL
, 0, 0, &icc_level
);
5975 dev_dbg(hba
->dev
, "%s: failed with error %d\n", __func__
, ret
);
5981 static void ufshcd_init_icc_levels(struct ufs_hba
*hba
)
5984 int buff_len
= hba
->desc_size
.pwr_desc
;
5985 u8 desc_buf
[hba
->desc_size
.pwr_desc
];
5987 ret
= ufshcd_read_power_desc(hba
, desc_buf
, buff_len
);
5990 "%s: Failed reading power descriptor.len = %d ret = %d",
5991 __func__
, buff_len
, ret
);
5995 hba
->init_prefetch_data
.icc_level
=
5996 ufshcd_find_max_sup_active_icc_level(hba
,
5997 desc_buf
, buff_len
);
5998 dev_dbg(hba
->dev
, "%s: setting icc_level 0x%x",
5999 __func__
, hba
->init_prefetch_data
.icc_level
);
6001 ret
= ufshcd_set_icc_levels_attr(hba
,
6002 hba
->init_prefetch_data
.icc_level
);
6006 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6007 __func__
, hba
->init_prefetch_data
.icc_level
, ret
);
6012 * ufshcd_scsi_add_wlus - Adds required W-LUs
6013 * @hba: per-adapter instance
6015 * UFS device specification requires the UFS devices to support 4 well known
6017 * "REPORT_LUNS" (address: 01h)
6018 * "UFS Device" (address: 50h)
6019 * "RPMB" (address: 44h)
6020 * "BOOT" (address: 30h)
6021 * UFS device's power management needs to be controlled by "POWER CONDITION"
6022 * field of SSU (START STOP UNIT) command. But this "power condition" field
6023 * will take effect only when its sent to "UFS device" well known logical unit
6024 * hence we require the scsi_device instance to represent this logical unit in
6025 * order for the UFS host driver to send the SSU command for power management.
6027 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6028 * Block) LU so user space process can control this LU. User space may also
6029 * want to have access to BOOT LU.
6031 * This function adds scsi device instances for each of all well known LUs
6032 * (except "REPORT LUNS" LU).
6034 * Returns zero on success (all required W-LUs are added successfully),
6035 * non-zero error value on failure (if failed to add any of the required W-LU).
6037 static int ufshcd_scsi_add_wlus(struct ufs_hba
*hba
)
6040 struct scsi_device
*sdev_rpmb
;
6041 struct scsi_device
*sdev_boot
;
6043 hba
->sdev_ufs_device
= __scsi_add_device(hba
->host
, 0, 0,
6044 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
), NULL
);
6045 if (IS_ERR(hba
->sdev_ufs_device
)) {
6046 ret
= PTR_ERR(hba
->sdev_ufs_device
);
6047 hba
->sdev_ufs_device
= NULL
;
6050 scsi_device_put(hba
->sdev_ufs_device
);
6052 sdev_boot
= __scsi_add_device(hba
->host
, 0, 0,
6053 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN
), NULL
);
6054 if (IS_ERR(sdev_boot
)) {
6055 ret
= PTR_ERR(sdev_boot
);
6056 goto remove_sdev_ufs_device
;
6058 scsi_device_put(sdev_boot
);
6060 sdev_rpmb
= __scsi_add_device(hba
->host
, 0, 0,
6061 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN
), NULL
);
6062 if (IS_ERR(sdev_rpmb
)) {
6063 ret
= PTR_ERR(sdev_rpmb
);
6064 goto remove_sdev_boot
;
6066 scsi_device_put(sdev_rpmb
);
6070 scsi_remove_device(sdev_boot
);
6071 remove_sdev_ufs_device
:
6072 scsi_remove_device(hba
->sdev_ufs_device
);
6077 static int ufs_get_device_desc(struct ufs_hba
*hba
,
6078 struct ufs_dev_desc
*dev_desc
)
6082 u8 str_desc_buf
[QUERY_DESC_MAX_SIZE
+ 1] = {0};
6083 u8 desc_buf
[hba
->desc_size
.dev_desc
];
6085 err
= ufshcd_read_device_desc(hba
, desc_buf
, hba
->desc_size
.dev_desc
);
6087 dev_err(hba
->dev
, "%s: Failed reading Device Desc. err = %d\n",
6093 * getting vendor (manufacturerID) and Bank Index in big endian
6096 dev_desc
->wmanufacturerid
= desc_buf
[DEVICE_DESC_PARAM_MANF_ID
] << 8 |
6097 desc_buf
[DEVICE_DESC_PARAM_MANF_ID
+ 1];
6099 model_index
= desc_buf
[DEVICE_DESC_PARAM_PRDCT_NAME
];
6101 err
= ufshcd_read_string_desc(hba
, model_index
, str_desc_buf
,
6102 QUERY_DESC_MAX_SIZE
, ASCII_STD
);
6104 dev_err(hba
->dev
, "%s: Failed reading Product Name. err = %d\n",
6109 str_desc_buf
[QUERY_DESC_MAX_SIZE
] = '\0';
6110 strlcpy(dev_desc
->model
, (str_desc_buf
+ QUERY_DESC_HDR_SIZE
),
6111 min_t(u8
, str_desc_buf
[QUERY_DESC_LENGTH_OFFSET
],
6114 /* Null terminate the model string */
6115 dev_desc
->model
[MAX_MODEL_LEN
] = '\0';
6121 static void ufs_fixup_device_setup(struct ufs_hba
*hba
,
6122 struct ufs_dev_desc
*dev_desc
)
6124 struct ufs_dev_fix
*f
;
6126 for (f
= ufs_fixups
; f
->quirk
; f
++) {
6127 if ((f
->card
.wmanufacturerid
== dev_desc
->wmanufacturerid
||
6128 f
->card
.wmanufacturerid
== UFS_ANY_VENDOR
) &&
6129 (STR_PRFX_EQUAL(f
->card
.model
, dev_desc
->model
) ||
6130 !strcmp(f
->card
.model
, UFS_ANY_MODEL
)))
6131 hba
->dev_quirks
|= f
->quirk
;
6136 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6137 * @hba: per-adapter instance
6139 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6140 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6141 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6142 * the hibern8 exit latency.
6144 * Returns zero on success, non-zero error value on failure.
6146 static int ufshcd_tune_pa_tactivate(struct ufs_hba
*hba
)
6149 u32 peer_rx_min_activatetime
= 0, tuned_pa_tactivate
;
6151 ret
= ufshcd_dme_peer_get(hba
,
6153 RX_MIN_ACTIVATETIME_CAPABILITY
,
6154 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6155 &peer_rx_min_activatetime
);
6159 /* make sure proper unit conversion is applied */
6160 tuned_pa_tactivate
=
6161 ((peer_rx_min_activatetime
* RX_MIN_ACTIVATETIME_UNIT_US
)
6162 / PA_TACTIVATE_TIME_UNIT_US
);
6163 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
6164 tuned_pa_tactivate
);
6171 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6172 * @hba: per-adapter instance
6174 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6175 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6176 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6177 * This optimal value can help reduce the hibern8 exit latency.
6179 * Returns zero on success, non-zero error value on failure.
6181 static int ufshcd_tune_pa_hibern8time(struct ufs_hba
*hba
)
6184 u32 local_tx_hibern8_time_cap
= 0, peer_rx_hibern8_time_cap
= 0;
6185 u32 max_hibern8_time
, tuned_pa_hibern8time
;
6187 ret
= ufshcd_dme_get(hba
,
6188 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY
,
6189 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6190 &local_tx_hibern8_time_cap
);
6194 ret
= ufshcd_dme_peer_get(hba
,
6195 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY
,
6196 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6197 &peer_rx_hibern8_time_cap
);
6201 max_hibern8_time
= max(local_tx_hibern8_time_cap
,
6202 peer_rx_hibern8_time_cap
);
6203 /* make sure proper unit conversion is applied */
6204 tuned_pa_hibern8time
= ((max_hibern8_time
* HIBERN8TIME_UNIT_US
)
6205 / PA_HIBERN8_TIME_UNIT_US
);
6206 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HIBERN8TIME
),
6207 tuned_pa_hibern8time
);
6213 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
6214 * less than device PA_TACTIVATE time.
6215 * @hba: per-adapter instance
6217 * Some UFS devices require host PA_TACTIVATE to be lower than device
6218 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
6221 * Returns zero on success, non-zero error value on failure.
6223 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba
*hba
)
6226 u32 granularity
, peer_granularity
;
6227 u32 pa_tactivate
, peer_pa_tactivate
;
6228 u32 pa_tactivate_us
, peer_pa_tactivate_us
;
6229 u8 gran_to_us_table
[] = {1, 4, 8, 16, 32, 100};
6231 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
6236 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
6241 if ((granularity
< PA_GRANULARITY_MIN_VAL
) ||
6242 (granularity
> PA_GRANULARITY_MAX_VAL
)) {
6243 dev_err(hba
->dev
, "%s: invalid host PA_GRANULARITY %d",
6244 __func__
, granularity
);
6248 if ((peer_granularity
< PA_GRANULARITY_MIN_VAL
) ||
6249 (peer_granularity
> PA_GRANULARITY_MAX_VAL
)) {
6250 dev_err(hba
->dev
, "%s: invalid device PA_GRANULARITY %d",
6251 __func__
, peer_granularity
);
6255 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
), &pa_tactivate
);
6259 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
6260 &peer_pa_tactivate
);
6264 pa_tactivate_us
= pa_tactivate
* gran_to_us_table
[granularity
- 1];
6265 peer_pa_tactivate_us
= peer_pa_tactivate
*
6266 gran_to_us_table
[peer_granularity
- 1];
6268 if (pa_tactivate_us
> peer_pa_tactivate_us
) {
6269 u32 new_peer_pa_tactivate
;
6271 new_peer_pa_tactivate
= pa_tactivate_us
/
6272 gran_to_us_table
[peer_granularity
- 1];
6273 new_peer_pa_tactivate
++;
6274 ret
= ufshcd_dme_peer_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
6275 new_peer_pa_tactivate
);
6282 static void ufshcd_tune_unipro_params(struct ufs_hba
*hba
)
6284 if (ufshcd_is_unipro_pa_params_tuning_req(hba
)) {
6285 ufshcd_tune_pa_tactivate(hba
);
6286 ufshcd_tune_pa_hibern8time(hba
);
6289 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_PA_TACTIVATE
)
6290 /* set 1ms timeout for PA_TACTIVATE */
6291 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
), 10);
6293 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
)
6294 ufshcd_quirk_tune_host_pa_tactivate(hba
);
6296 ufshcd_vops_apply_dev_quirks(hba
);
6299 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba
*hba
)
6301 int err_reg_hist_size
= sizeof(struct ufs_uic_err_reg_hist
);
6303 hba
->ufs_stats
.hibern8_exit_cnt
= 0;
6304 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
6306 memset(&hba
->ufs_stats
.pa_err
, 0, err_reg_hist_size
);
6307 memset(&hba
->ufs_stats
.dl_err
, 0, err_reg_hist_size
);
6308 memset(&hba
->ufs_stats
.nl_err
, 0, err_reg_hist_size
);
6309 memset(&hba
->ufs_stats
.tl_err
, 0, err_reg_hist_size
);
6310 memset(&hba
->ufs_stats
.dme_err
, 0, err_reg_hist_size
);
6312 hba
->req_abort_count
= 0;
6315 static void ufshcd_init_desc_sizes(struct ufs_hba
*hba
)
6319 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_DEVICE
, 0,
6320 &hba
->desc_size
.dev_desc
);
6322 hba
->desc_size
.dev_desc
= QUERY_DESC_DEVICE_DEF_SIZE
;
6324 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_POWER
, 0,
6325 &hba
->desc_size
.pwr_desc
);
6327 hba
->desc_size
.pwr_desc
= QUERY_DESC_POWER_DEF_SIZE
;
6329 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_INTERCONNECT
, 0,
6330 &hba
->desc_size
.interc_desc
);
6332 hba
->desc_size
.interc_desc
= QUERY_DESC_INTERCONNECT_DEF_SIZE
;
6334 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_CONFIGURATION
, 0,
6335 &hba
->desc_size
.conf_desc
);
6337 hba
->desc_size
.conf_desc
= QUERY_DESC_CONFIGURATION_DEF_SIZE
;
6339 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_UNIT
, 0,
6340 &hba
->desc_size
.unit_desc
);
6342 hba
->desc_size
.unit_desc
= QUERY_DESC_UNIT_DEF_SIZE
;
6344 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_GEOMETRY
, 0,
6345 &hba
->desc_size
.geom_desc
);
6347 hba
->desc_size
.geom_desc
= QUERY_DESC_GEOMETRY_DEF_SIZE
;
6350 static void ufshcd_def_desc_sizes(struct ufs_hba
*hba
)
6352 hba
->desc_size
.dev_desc
= QUERY_DESC_DEVICE_DEF_SIZE
;
6353 hba
->desc_size
.pwr_desc
= QUERY_DESC_POWER_DEF_SIZE
;
6354 hba
->desc_size
.interc_desc
= QUERY_DESC_INTERCONNECT_DEF_SIZE
;
6355 hba
->desc_size
.conf_desc
= QUERY_DESC_CONFIGURATION_DEF_SIZE
;
6356 hba
->desc_size
.unit_desc
= QUERY_DESC_UNIT_DEF_SIZE
;
6357 hba
->desc_size
.geom_desc
= QUERY_DESC_GEOMETRY_DEF_SIZE
;
6361 * ufshcd_probe_hba - probe hba to detect device and initialize
6362 * @hba: per-adapter instance
6364 * Execute link-startup and verify device initialization
6366 static int ufshcd_probe_hba(struct ufs_hba
*hba
)
6368 struct ufs_dev_desc card
= {0};
6370 ktime_t start
= ktime_get();
6372 ret
= ufshcd_link_startup(hba
);
6376 /* set the default level for urgent bkops */
6377 hba
->urgent_bkops_lvl
= BKOPS_STATUS_PERF_IMPACT
;
6378 hba
->is_urgent_bkops_lvl_checked
= false;
6380 /* Debug counters initialization */
6381 ufshcd_clear_dbg_ufs_stats(hba
);
6383 /* UniPro link is active now */
6384 ufshcd_set_link_active(hba
);
6386 ret
= ufshcd_verify_dev_init(hba
);
6390 ret
= ufshcd_complete_dev_init(hba
);
6394 /* Init check for device descriptor sizes */
6395 ufshcd_init_desc_sizes(hba
);
6397 ret
= ufs_get_device_desc(hba
, &card
);
6399 dev_err(hba
->dev
, "%s: Failed getting device info. err = %d\n",
6404 ufs_fixup_device_setup(hba
, &card
);
6405 ufshcd_tune_unipro_params(hba
);
6407 ret
= ufshcd_set_vccq_rail_unused(hba
,
6408 (hba
->dev_quirks
& UFS_DEVICE_NO_VCCQ
) ? true : false);
6412 /* UFS device is also active now */
6413 ufshcd_set_ufs_dev_active(hba
);
6414 ufshcd_force_reset_auto_bkops(hba
);
6415 hba
->wlun_dev_clr_ua
= true;
6417 if (ufshcd_get_max_pwr_mode(hba
)) {
6419 "%s: Failed getting max supported power mode\n",
6422 ret
= ufshcd_config_pwr_mode(hba
, &hba
->max_pwr_info
.info
);
6424 dev_err(hba
->dev
, "%s: Failed setting power mode, err = %d\n",
6430 /* set the state as operational after switching to desired gear */
6431 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6434 * If we are in error handling context or in power management callbacks
6435 * context, no need to scan the host
6437 if (!ufshcd_eh_in_progress(hba
) && !hba
->pm_op_in_progress
) {
6440 /* clear any previous UFS device information */
6441 memset(&hba
->dev_info
, 0, sizeof(hba
->dev_info
));
6442 if (!ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
6443 QUERY_FLAG_IDN_PWR_ON_WPE
, &flag
))
6444 hba
->dev_info
.f_power_on_wp_en
= flag
;
6446 if (!hba
->is_init_prefetch
)
6447 ufshcd_init_icc_levels(hba
);
6449 /* Add required well known logical units to scsi mid layer */
6450 if (ufshcd_scsi_add_wlus(hba
))
6453 /* Initialize devfreq after UFS device is detected */
6454 if (ufshcd_is_clkscaling_supported(hba
)) {
6455 memcpy(&hba
->clk_scaling
.saved_pwr_info
.info
,
6457 sizeof(struct ufs_pa_layer_attr
));
6458 hba
->clk_scaling
.saved_pwr_info
.is_valid
= true;
6459 if (!hba
->devfreq
) {
6460 hba
->devfreq
= devm_devfreq_add_device(hba
->dev
,
6461 &ufs_devfreq_profile
,
6464 if (IS_ERR(hba
->devfreq
)) {
6465 ret
= PTR_ERR(hba
->devfreq
);
6466 dev_err(hba
->dev
, "Unable to register with devfreq %d\n",
6471 hba
->clk_scaling
.is_allowed
= true;
6474 scsi_scan_host(hba
->host
);
6475 pm_runtime_put_sync(hba
->dev
);
6478 if (!hba
->is_init_prefetch
)
6479 hba
->is_init_prefetch
= true;
6483 * If we failed to initialize the device or the device is not
6484 * present, turn off the power/clocks etc.
6486 if (ret
&& !ufshcd_eh_in_progress(hba
) && !hba
->pm_op_in_progress
) {
6487 pm_runtime_put_sync(hba
->dev
);
6488 ufshcd_hba_exit(hba
);
6491 trace_ufshcd_init(dev_name(hba
->dev
), ret
,
6492 ktime_to_us(ktime_sub(ktime_get(), start
)),
6493 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
6498 * ufshcd_async_scan - asynchronous execution for probing hba
6499 * @data: data pointer to pass to this function
6500 * @cookie: cookie data
6502 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
)
6504 struct ufs_hba
*hba
= (struct ufs_hba
*)data
;
6506 ufshcd_probe_hba(hba
);
6509 static enum blk_eh_timer_return
ufshcd_eh_timed_out(struct scsi_cmnd
*scmd
)
6511 unsigned long flags
;
6512 struct Scsi_Host
*host
;
6513 struct ufs_hba
*hba
;
6517 if (!scmd
|| !scmd
->device
|| !scmd
->device
->host
)
6518 return BLK_EH_NOT_HANDLED
;
6520 host
= scmd
->device
->host
;
6521 hba
= shost_priv(host
);
6523 return BLK_EH_NOT_HANDLED
;
6525 spin_lock_irqsave(host
->host_lock
, flags
);
6527 for_each_set_bit(index
, &hba
->outstanding_reqs
, hba
->nutrs
) {
6528 if (hba
->lrb
[index
].cmd
== scmd
) {
6534 spin_unlock_irqrestore(host
->host_lock
, flags
);
6537 * Bypass SCSI error handling and reset the block layer timer if this
6538 * SCSI command was not actually dispatched to UFS driver, otherwise
6539 * let SCSI layer handle the error as usual.
6541 return found
? BLK_EH_NOT_HANDLED
: BLK_EH_RESET_TIMER
;
6544 static struct scsi_host_template ufshcd_driver_template
= {
6545 .module
= THIS_MODULE
,
6547 .proc_name
= UFSHCD
,
6548 .queuecommand
= ufshcd_queuecommand
,
6549 .slave_alloc
= ufshcd_slave_alloc
,
6550 .slave_configure
= ufshcd_slave_configure
,
6551 .slave_destroy
= ufshcd_slave_destroy
,
6552 .change_queue_depth
= ufshcd_change_queue_depth
,
6553 .eh_abort_handler
= ufshcd_abort
,
6554 .eh_device_reset_handler
= ufshcd_eh_device_reset_handler
,
6555 .eh_host_reset_handler
= ufshcd_eh_host_reset_handler
,
6556 .eh_timed_out
= ufshcd_eh_timed_out
,
6558 .sg_tablesize
= SG_ALL
,
6559 .cmd_per_lun
= UFSHCD_CMD_PER_LUN
,
6560 .can_queue
= UFSHCD_CAN_QUEUE
,
6561 .max_host_blocked
= 1,
6562 .track_queue_depth
= 1,
6565 static int ufshcd_config_vreg_load(struct device
*dev
, struct ufs_vreg
*vreg
,
6573 ret
= regulator_set_load(vreg
->reg
, ua
);
6575 dev_err(dev
, "%s: %s set load (ua=%d) failed, err=%d\n",
6576 __func__
, vreg
->name
, ua
, ret
);
6582 static inline int ufshcd_config_vreg_lpm(struct ufs_hba
*hba
,
6583 struct ufs_vreg
*vreg
)
6587 else if (vreg
->unused
)
6590 return ufshcd_config_vreg_load(hba
->dev
, vreg
,
6591 UFS_VREG_LPM_LOAD_UA
);
6594 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
6595 struct ufs_vreg
*vreg
)
6599 else if (vreg
->unused
)
6602 return ufshcd_config_vreg_load(hba
->dev
, vreg
, vreg
->max_uA
);
6605 static int ufshcd_config_vreg(struct device
*dev
,
6606 struct ufs_vreg
*vreg
, bool on
)
6609 struct regulator
*reg
= vreg
->reg
;
6610 const char *name
= vreg
->name
;
6611 int min_uV
, uA_load
;
6615 if (regulator_count_voltages(reg
) > 0) {
6616 min_uV
= on
? vreg
->min_uV
: 0;
6617 ret
= regulator_set_voltage(reg
, min_uV
, vreg
->max_uV
);
6619 dev_err(dev
, "%s: %s set voltage failed, err=%d\n",
6620 __func__
, name
, ret
);
6624 uA_load
= on
? vreg
->max_uA
: 0;
6625 ret
= ufshcd_config_vreg_load(dev
, vreg
, uA_load
);
6633 static int ufshcd_enable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
6639 else if (vreg
->enabled
|| vreg
->unused
)
6642 ret
= ufshcd_config_vreg(dev
, vreg
, true);
6644 ret
= regulator_enable(vreg
->reg
);
6647 vreg
->enabled
= true;
6649 dev_err(dev
, "%s: %s enable failed, err=%d\n",
6650 __func__
, vreg
->name
, ret
);
6655 static int ufshcd_disable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
6661 else if (!vreg
->enabled
|| vreg
->unused
)
6664 ret
= regulator_disable(vreg
->reg
);
6667 /* ignore errors on applying disable config */
6668 ufshcd_config_vreg(dev
, vreg
, false);
6669 vreg
->enabled
= false;
6671 dev_err(dev
, "%s: %s disable failed, err=%d\n",
6672 __func__
, vreg
->name
, ret
);
6678 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
)
6681 struct device
*dev
= hba
->dev
;
6682 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
6687 ret
= ufshcd_toggle_vreg(dev
, info
->vcc
, on
);
6691 ret
= ufshcd_toggle_vreg(dev
, info
->vccq
, on
);
6695 ret
= ufshcd_toggle_vreg(dev
, info
->vccq2
, on
);
6701 ufshcd_toggle_vreg(dev
, info
->vccq2
, false);
6702 ufshcd_toggle_vreg(dev
, info
->vccq
, false);
6703 ufshcd_toggle_vreg(dev
, info
->vcc
, false);
6708 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
)
6710 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
6713 return ufshcd_toggle_vreg(hba
->dev
, info
->vdd_hba
, on
);
6718 static int ufshcd_get_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
6725 vreg
->reg
= devm_regulator_get(dev
, vreg
->name
);
6726 if (IS_ERR(vreg
->reg
)) {
6727 ret
= PTR_ERR(vreg
->reg
);
6728 dev_err(dev
, "%s: %s get failed, err=%d\n",
6729 __func__
, vreg
->name
, ret
);
6735 static int ufshcd_init_vreg(struct ufs_hba
*hba
)
6738 struct device
*dev
= hba
->dev
;
6739 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
6744 ret
= ufshcd_get_vreg(dev
, info
->vcc
);
6748 ret
= ufshcd_get_vreg(dev
, info
->vccq
);
6752 ret
= ufshcd_get_vreg(dev
, info
->vccq2
);
6757 static int ufshcd_init_hba_vreg(struct ufs_hba
*hba
)
6759 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
6762 return ufshcd_get_vreg(hba
->dev
, info
->vdd_hba
);
6767 static int ufshcd_set_vccq_rail_unused(struct ufs_hba
*hba
, bool unused
)
6770 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
6774 else if (!info
->vccq
)
6778 /* shut off the rail here */
6779 ret
= ufshcd_toggle_vreg(hba
->dev
, info
->vccq
, false);
6781 * Mark this rail as no longer used, so it doesn't get enabled
6785 info
->vccq
->unused
= true;
6788 * rail should have been already enabled hence just make sure
6789 * that unused flag is cleared.
6791 info
->vccq
->unused
= false;
6797 static int __ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
,
6801 struct ufs_clk_info
*clki
;
6802 struct list_head
*head
= &hba
->clk_list_head
;
6803 unsigned long flags
;
6804 ktime_t start
= ktime_get();
6805 bool clk_state_changed
= false;
6807 if (!head
|| list_empty(head
))
6810 ret
= ufshcd_vops_setup_clocks(hba
, on
, PRE_CHANGE
);
6814 list_for_each_entry(clki
, head
, list
) {
6815 if (!IS_ERR_OR_NULL(clki
->clk
)) {
6816 if (skip_ref_clk
&& !strcmp(clki
->name
, "ref_clk"))
6819 clk_state_changed
= on
^ clki
->enabled
;
6820 if (on
&& !clki
->enabled
) {
6821 ret
= clk_prepare_enable(clki
->clk
);
6823 dev_err(hba
->dev
, "%s: %s prepare enable failed, %d\n",
6824 __func__
, clki
->name
, ret
);
6827 } else if (!on
&& clki
->enabled
) {
6828 clk_disable_unprepare(clki
->clk
);
6831 dev_dbg(hba
->dev
, "%s: clk: %s %sabled\n", __func__
,
6832 clki
->name
, on
? "en" : "dis");
6836 ret
= ufshcd_vops_setup_clocks(hba
, on
, POST_CHANGE
);
6842 list_for_each_entry(clki
, head
, list
) {
6843 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->enabled
)
6844 clk_disable_unprepare(clki
->clk
);
6846 } else if (!ret
&& on
) {
6847 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6848 hba
->clk_gating
.state
= CLKS_ON
;
6849 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
6850 hba
->clk_gating
.state
);
6851 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6854 if (clk_state_changed
)
6855 trace_ufshcd_profile_clk_gating(dev_name(hba
->dev
),
6856 (on
? "on" : "off"),
6857 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
6861 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
)
6863 return __ufshcd_setup_clocks(hba
, on
, false);
6866 static int ufshcd_init_clocks(struct ufs_hba
*hba
)
6869 struct ufs_clk_info
*clki
;
6870 struct device
*dev
= hba
->dev
;
6871 struct list_head
*head
= &hba
->clk_list_head
;
6873 if (!head
|| list_empty(head
))
6876 list_for_each_entry(clki
, head
, list
) {
6880 clki
->clk
= devm_clk_get(dev
, clki
->name
);
6881 if (IS_ERR(clki
->clk
)) {
6882 ret
= PTR_ERR(clki
->clk
);
6883 dev_err(dev
, "%s: %s clk get failed, %d\n",
6884 __func__
, clki
->name
, ret
);
6888 if (clki
->max_freq
) {
6889 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
6891 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
6892 __func__
, clki
->name
,
6893 clki
->max_freq
, ret
);
6896 clki
->curr_freq
= clki
->max_freq
;
6898 dev_dbg(dev
, "%s: clk: %s, rate: %lu\n", __func__
,
6899 clki
->name
, clk_get_rate(clki
->clk
));
6905 static int ufshcd_variant_hba_init(struct ufs_hba
*hba
)
6912 err
= ufshcd_vops_init(hba
);
6916 err
= ufshcd_vops_setup_regulators(hba
, true);
6923 ufshcd_vops_exit(hba
);
6926 dev_err(hba
->dev
, "%s: variant %s init failed err %d\n",
6927 __func__
, ufshcd_get_var_name(hba
), err
);
6931 static void ufshcd_variant_hba_exit(struct ufs_hba
*hba
)
6936 ufshcd_vops_setup_regulators(hba
, false);
6938 ufshcd_vops_exit(hba
);
6941 static int ufshcd_hba_init(struct ufs_hba
*hba
)
6946 * Handle host controller power separately from the UFS device power
6947 * rails as it will help controlling the UFS host controller power
6948 * collapse easily which is different than UFS device power collapse.
6949 * Also, enable the host controller power before we go ahead with rest
6950 * of the initialization here.
6952 err
= ufshcd_init_hba_vreg(hba
);
6956 err
= ufshcd_setup_hba_vreg(hba
, true);
6960 err
= ufshcd_init_clocks(hba
);
6962 goto out_disable_hba_vreg
;
6964 err
= ufshcd_setup_clocks(hba
, true);
6966 goto out_disable_hba_vreg
;
6968 err
= ufshcd_init_vreg(hba
);
6970 goto out_disable_clks
;
6972 err
= ufshcd_setup_vreg(hba
, true);
6974 goto out_disable_clks
;
6976 err
= ufshcd_variant_hba_init(hba
);
6978 goto out_disable_vreg
;
6980 hba
->is_powered
= true;
6984 ufshcd_setup_vreg(hba
, false);
6986 ufshcd_setup_clocks(hba
, false);
6987 out_disable_hba_vreg
:
6988 ufshcd_setup_hba_vreg(hba
, false);
6993 static void ufshcd_hba_exit(struct ufs_hba
*hba
)
6995 if (hba
->is_powered
) {
6996 ufshcd_variant_hba_exit(hba
);
6997 ufshcd_setup_vreg(hba
, false);
6998 ufshcd_suspend_clkscaling(hba
);
6999 if (ufshcd_is_clkscaling_supported(hba
)) {
7001 ufshcd_suspend_clkscaling(hba
);
7002 destroy_workqueue(hba
->clk_scaling
.workq
);
7004 ufshcd_setup_clocks(hba
, false);
7005 ufshcd_setup_hba_vreg(hba
, false);
7006 hba
->is_powered
= false;
7011 ufshcd_send_request_sense(struct ufs_hba
*hba
, struct scsi_device
*sdp
)
7013 unsigned char cmd
[6] = {REQUEST_SENSE
,
7017 UFSHCD_REQ_SENSE_SIZE
,
7022 buffer
= kzalloc(UFSHCD_REQ_SENSE_SIZE
, GFP_KERNEL
);
7028 ret
= scsi_execute(sdp
, cmd
, DMA_FROM_DEVICE
, buffer
,
7029 UFSHCD_REQ_SENSE_SIZE
, NULL
, NULL
,
7030 msecs_to_jiffies(1000), 3, 0, RQF_PM
, NULL
);
7032 pr_err("%s: failed with err %d\n", __func__
, ret
);
7040 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7042 * @hba: per adapter instance
7043 * @pwr_mode: device power mode to set
7045 * Returns 0 if requested power mode is set successfully
7046 * Returns non-zero if failed to set the requested power mode
7048 static int ufshcd_set_dev_pwr_mode(struct ufs_hba
*hba
,
7049 enum ufs_dev_pwr_mode pwr_mode
)
7051 unsigned char cmd
[6] = { START_STOP
};
7052 struct scsi_sense_hdr sshdr
;
7053 struct scsi_device
*sdp
;
7054 unsigned long flags
;
7057 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7058 sdp
= hba
->sdev_ufs_device
;
7060 ret
= scsi_device_get(sdp
);
7061 if (!ret
&& !scsi_device_online(sdp
)) {
7063 scsi_device_put(sdp
);
7068 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7074 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7075 * handling, which would wait for host to be resumed. Since we know
7076 * we are functional while we are here, skip host resume in error
7079 hba
->host
->eh_noresume
= 1;
7080 if (hba
->wlun_dev_clr_ua
) {
7081 ret
= ufshcd_send_request_sense(hba
, sdp
);
7084 /* Unit attention condition is cleared now */
7085 hba
->wlun_dev_clr_ua
= false;
7088 cmd
[4] = pwr_mode
<< 4;
7091 * Current function would be generally called from the power management
7092 * callbacks hence set the RQF_PM flag so that it doesn't resume the
7093 * already suspended childs.
7095 ret
= scsi_execute(sdp
, cmd
, DMA_NONE
, NULL
, 0, NULL
, &sshdr
,
7096 START_STOP_TIMEOUT
, 0, 0, RQF_PM
, NULL
);
7098 sdev_printk(KERN_WARNING
, sdp
,
7099 "START_STOP failed for power mode: %d, result %x\n",
7101 if (driver_byte(ret
) & DRIVER_SENSE
)
7102 scsi_print_sense_hdr(sdp
, NULL
, &sshdr
);
7106 hba
->curr_dev_pwr_mode
= pwr_mode
;
7108 scsi_device_put(sdp
);
7109 hba
->host
->eh_noresume
= 0;
7113 static int ufshcd_link_state_transition(struct ufs_hba
*hba
,
7114 enum uic_link_state req_link_state
,
7115 int check_for_bkops
)
7119 if (req_link_state
== hba
->uic_link_state
)
7122 if (req_link_state
== UIC_LINK_HIBERN8_STATE
) {
7123 ret
= ufshcd_uic_hibern8_enter(hba
);
7125 ufshcd_set_link_hibern8(hba
);
7130 * If autobkops is enabled, link can't be turned off because
7131 * turning off the link would also turn off the device.
7133 else if ((req_link_state
== UIC_LINK_OFF_STATE
) &&
7134 (!check_for_bkops
|| (check_for_bkops
&&
7135 !hba
->auto_bkops_enabled
))) {
7137 * Let's make sure that link is in low power mode, we are doing
7138 * this currently by putting the link in Hibern8. Otherway to
7139 * put the link in low power mode is to send the DME end point
7140 * to device and then send the DME reset command to local
7141 * unipro. But putting the link in hibern8 is much faster.
7143 ret
= ufshcd_uic_hibern8_enter(hba
);
7147 * Change controller state to "reset state" which
7148 * should also put the link in off/reset state
7150 ufshcd_hba_stop(hba
, true);
7152 * TODO: Check if we need any delay to make sure that
7153 * controller is reset
7155 ufshcd_set_link_off(hba
);
7162 static void ufshcd_vreg_set_lpm(struct ufs_hba
*hba
)
7165 * It seems some UFS devices may keep drawing more than sleep current
7166 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7167 * To avoid this situation, add 2ms delay before putting these UFS
7168 * rails in LPM mode.
7170 if (!ufshcd_is_link_active(hba
) &&
7171 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
)
7172 usleep_range(2000, 2100);
7175 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7178 * If UFS device and link is in OFF state, all power supplies (VCC,
7179 * VCCQ, VCCQ2) can be turned off if power on write protect is not
7180 * required. If UFS link is inactive (Hibern8 or OFF state) and device
7181 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7183 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7184 * in low power state which would save some power.
7186 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
7187 !hba
->dev_info
.is_lu_power_on_wp
) {
7188 ufshcd_setup_vreg(hba
, false);
7189 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
7190 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
7191 if (!ufshcd_is_link_active(hba
)) {
7192 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
7193 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq2
);
7198 static int ufshcd_vreg_set_hpm(struct ufs_hba
*hba
)
7202 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
7203 !hba
->dev_info
.is_lu_power_on_wp
) {
7204 ret
= ufshcd_setup_vreg(hba
, true);
7205 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
7206 if (!ret
&& !ufshcd_is_link_active(hba
)) {
7207 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
7210 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
7214 ret
= ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, true);
7219 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
7221 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
7226 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
)
7228 if (ufshcd_is_link_off(hba
))
7229 ufshcd_setup_hba_vreg(hba
, false);
7232 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
)
7234 if (ufshcd_is_link_off(hba
))
7235 ufshcd_setup_hba_vreg(hba
, true);
7239 * ufshcd_suspend - helper function for suspend operations
7240 * @hba: per adapter instance
7241 * @pm_op: desired low power operation type
7243 * This function will try to put the UFS device and link into low power
7244 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7245 * (System PM level).
7247 * If this function is called during shutdown, it will make sure that
7248 * both UFS device and UFS link is powered off.
7250 * NOTE: UFS device & link must be active before we enter in this function.
7252 * Returns 0 for success and non-zero for failure
7254 static int ufshcd_suspend(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
7257 enum ufs_pm_level pm_lvl
;
7258 enum ufs_dev_pwr_mode req_dev_pwr_mode
;
7259 enum uic_link_state req_link_state
;
7261 hba
->pm_op_in_progress
= 1;
7262 if (!ufshcd_is_shutdown_pm(pm_op
)) {
7263 pm_lvl
= ufshcd_is_runtime_pm(pm_op
) ?
7264 hba
->rpm_lvl
: hba
->spm_lvl
;
7265 req_dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl
);
7266 req_link_state
= ufs_get_pm_lvl_to_link_pwr_state(pm_lvl
);
7268 req_dev_pwr_mode
= UFS_POWERDOWN_PWR_MODE
;
7269 req_link_state
= UIC_LINK_OFF_STATE
;
7273 * If we can't transition into any of the low power modes
7274 * just gate the clocks.
7276 ufshcd_hold(hba
, false);
7277 hba
->clk_gating
.is_suspended
= true;
7279 if (hba
->clk_scaling
.is_allowed
) {
7280 cancel_work_sync(&hba
->clk_scaling
.suspend_work
);
7281 cancel_work_sync(&hba
->clk_scaling
.resume_work
);
7282 ufshcd_suspend_clkscaling(hba
);
7285 if (req_dev_pwr_mode
== UFS_ACTIVE_PWR_MODE
&&
7286 req_link_state
== UIC_LINK_ACTIVE_STATE
) {
7290 if ((req_dev_pwr_mode
== hba
->curr_dev_pwr_mode
) &&
7291 (req_link_state
== hba
->uic_link_state
))
7294 /* UFS device & link must be active before we enter in this function */
7295 if (!ufshcd_is_ufs_dev_active(hba
) || !ufshcd_is_link_active(hba
)) {
7300 if (ufshcd_is_runtime_pm(pm_op
)) {
7301 if (ufshcd_can_autobkops_during_suspend(hba
)) {
7303 * The device is idle with no requests in the queue,
7304 * allow background operations if bkops status shows
7305 * that performance might be impacted.
7307 ret
= ufshcd_urgent_bkops(hba
);
7311 /* make sure that auto bkops is disabled */
7312 ufshcd_disable_auto_bkops(hba
);
7316 if ((req_dev_pwr_mode
!= hba
->curr_dev_pwr_mode
) &&
7317 ((ufshcd_is_runtime_pm(pm_op
) && !hba
->auto_bkops_enabled
) ||
7318 !ufshcd_is_runtime_pm(pm_op
))) {
7319 /* ensure that bkops is disabled */
7320 ufshcd_disable_auto_bkops(hba
);
7321 ret
= ufshcd_set_dev_pwr_mode(hba
, req_dev_pwr_mode
);
7326 ret
= ufshcd_link_state_transition(hba
, req_link_state
, 1);
7328 goto set_dev_active
;
7330 ufshcd_vreg_set_lpm(hba
);
7334 * Call vendor specific suspend callback. As these callbacks may access
7335 * vendor specific host controller register space call them before the
7336 * host clocks are ON.
7338 ret
= ufshcd_vops_suspend(hba
, pm_op
);
7340 goto set_link_active
;
7342 if (!ufshcd_is_link_active(hba
))
7343 ufshcd_setup_clocks(hba
, false);
7345 /* If link is active, device ref_clk can't be switched off */
7346 __ufshcd_setup_clocks(hba
, false, true);
7348 hba
->clk_gating
.state
= CLKS_OFF
;
7349 trace_ufshcd_clk_gating(dev_name(hba
->dev
), hba
->clk_gating
.state
);
7351 * Disable the host irq as host controller as there won't be any
7352 * host controller transaction expected till resume.
7354 ufshcd_disable_irq(hba
);
7355 /* Put the host controller in low power mode if possible */
7356 ufshcd_hba_vreg_set_lpm(hba
);
7360 if (hba
->clk_scaling
.is_allowed
)
7361 ufshcd_resume_clkscaling(hba
);
7362 ufshcd_vreg_set_hpm(hba
);
7363 if (ufshcd_is_link_hibern8(hba
) && !ufshcd_uic_hibern8_exit(hba
))
7364 ufshcd_set_link_active(hba
);
7365 else if (ufshcd_is_link_off(hba
))
7366 ufshcd_host_reset_and_restore(hba
);
7368 if (!ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
))
7369 ufshcd_disable_auto_bkops(hba
);
7371 if (hba
->clk_scaling
.is_allowed
)
7372 ufshcd_resume_clkscaling(hba
);
7373 hba
->clk_gating
.is_suspended
= false;
7374 ufshcd_release(hba
);
7376 hba
->pm_op_in_progress
= 0;
7381 * ufshcd_resume - helper function for resume operations
7382 * @hba: per adapter instance
7383 * @pm_op: runtime PM or system PM
7385 * This function basically brings the UFS device, UniPro link and controller
7388 * Returns 0 for success and non-zero for failure
7390 static int ufshcd_resume(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
7393 enum uic_link_state old_link_state
;
7395 hba
->pm_op_in_progress
= 1;
7396 old_link_state
= hba
->uic_link_state
;
7398 ufshcd_hba_vreg_set_hpm(hba
);
7399 /* Make sure clocks are enabled before accessing controller */
7400 ret
= ufshcd_setup_clocks(hba
, true);
7404 /* enable the host irq as host controller would be active soon */
7405 ret
= ufshcd_enable_irq(hba
);
7407 goto disable_irq_and_vops_clks
;
7409 ret
= ufshcd_vreg_set_hpm(hba
);
7411 goto disable_irq_and_vops_clks
;
7414 * Call vendor specific resume callback. As these callbacks may access
7415 * vendor specific host controller register space call them when the
7416 * host clocks are ON.
7418 ret
= ufshcd_vops_resume(hba
, pm_op
);
7422 if (ufshcd_is_link_hibern8(hba
)) {
7423 ret
= ufshcd_uic_hibern8_exit(hba
);
7425 ufshcd_set_link_active(hba
);
7427 goto vendor_suspend
;
7428 } else if (ufshcd_is_link_off(hba
)) {
7429 ret
= ufshcd_host_reset_and_restore(hba
);
7431 * ufshcd_host_reset_and_restore() should have already
7432 * set the link state as active
7434 if (ret
|| !ufshcd_is_link_active(hba
))
7435 goto vendor_suspend
;
7438 if (!ufshcd_is_ufs_dev_active(hba
)) {
7439 ret
= ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
);
7441 goto set_old_link_state
;
7444 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
))
7445 ufshcd_enable_auto_bkops(hba
);
7448 * If BKOPs operations are urgently needed at this moment then
7449 * keep auto-bkops enabled or else disable it.
7451 ufshcd_urgent_bkops(hba
);
7453 hba
->clk_gating
.is_suspended
= false;
7455 if (hba
->clk_scaling
.is_allowed
)
7456 ufshcd_resume_clkscaling(hba
);
7458 /* Schedule clock gating in case of no access to UFS device yet */
7459 ufshcd_release(hba
);
7463 ufshcd_link_state_transition(hba
, old_link_state
, 0);
7465 ufshcd_vops_suspend(hba
, pm_op
);
7467 ufshcd_vreg_set_lpm(hba
);
7468 disable_irq_and_vops_clks
:
7469 ufshcd_disable_irq(hba
);
7470 if (hba
->clk_scaling
.is_allowed
)
7471 ufshcd_suspend_clkscaling(hba
);
7472 ufshcd_setup_clocks(hba
, false);
7474 hba
->pm_op_in_progress
= 0;
7479 * ufshcd_system_suspend - system suspend routine
7480 * @hba: per adapter instance
7481 * @pm_op: runtime PM or system PM
7483 * Check the description of ufshcd_suspend() function for more details.
7485 * Returns 0 for success and non-zero for failure
7487 int ufshcd_system_suspend(struct ufs_hba
*hba
)
7490 ktime_t start
= ktime_get();
7492 if (!hba
|| !hba
->is_powered
)
7495 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba
->spm_lvl
) ==
7496 hba
->curr_dev_pwr_mode
) &&
7497 (ufs_get_pm_lvl_to_link_pwr_state(hba
->spm_lvl
) ==
7498 hba
->uic_link_state
))
7501 if (pm_runtime_suspended(hba
->dev
)) {
7503 * UFS device and/or UFS link low power states during runtime
7504 * suspend seems to be different than what is expected during
7505 * system suspend. Hence runtime resume the devic & link and
7506 * let the system suspend low power states to take effect.
7507 * TODO: If resume takes longer time, we might have optimize
7508 * it in future by not resuming everything if possible.
7510 ret
= ufshcd_runtime_resume(hba
);
7515 ret
= ufshcd_suspend(hba
, UFS_SYSTEM_PM
);
7517 trace_ufshcd_system_suspend(dev_name(hba
->dev
), ret
,
7518 ktime_to_us(ktime_sub(ktime_get(), start
)),
7519 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
7521 hba
->is_sys_suspended
= true;
7524 EXPORT_SYMBOL(ufshcd_system_suspend
);
7527 * ufshcd_system_resume - system resume routine
7528 * @hba: per adapter instance
7530 * Returns 0 for success and non-zero for failure
7533 int ufshcd_system_resume(struct ufs_hba
*hba
)
7536 ktime_t start
= ktime_get();
7541 if (!hba
->is_powered
|| pm_runtime_suspended(hba
->dev
))
7543 * Let the runtime resume take care of resuming
7544 * if runtime suspended.
7548 ret
= ufshcd_resume(hba
, UFS_SYSTEM_PM
);
7550 trace_ufshcd_system_resume(dev_name(hba
->dev
), ret
,
7551 ktime_to_us(ktime_sub(ktime_get(), start
)),
7552 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
7555 EXPORT_SYMBOL(ufshcd_system_resume
);
7558 * ufshcd_runtime_suspend - runtime suspend routine
7559 * @hba: per adapter instance
7561 * Check the description of ufshcd_suspend() function for more details.
7563 * Returns 0 for success and non-zero for failure
7565 int ufshcd_runtime_suspend(struct ufs_hba
*hba
)
7568 ktime_t start
= ktime_get();
7573 if (!hba
->is_powered
)
7576 ret
= ufshcd_suspend(hba
, UFS_RUNTIME_PM
);
7578 trace_ufshcd_runtime_suspend(dev_name(hba
->dev
), ret
,
7579 ktime_to_us(ktime_sub(ktime_get(), start
)),
7580 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
7583 EXPORT_SYMBOL(ufshcd_runtime_suspend
);
7586 * ufshcd_runtime_resume - runtime resume routine
7587 * @hba: per adapter instance
7589 * This function basically brings the UFS device, UniPro link and controller
7590 * to active state. Following operations are done in this function:
7592 * 1. Turn on all the controller related clocks
7593 * 2. Bring the UniPro link out of Hibernate state
7594 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
7596 * 4. If auto-bkops is enabled on the device, disable it.
7598 * So following would be the possible power state after this function return
7600 * S1: UFS device in Active state with VCC rail ON
7601 * UniPro link in Active state
7602 * All the UFS/UniPro controller clocks are ON
7604 * Returns 0 for success and non-zero for failure
7606 int ufshcd_runtime_resume(struct ufs_hba
*hba
)
7609 ktime_t start
= ktime_get();
7614 if (!hba
->is_powered
)
7617 ret
= ufshcd_resume(hba
, UFS_RUNTIME_PM
);
7619 trace_ufshcd_runtime_resume(dev_name(hba
->dev
), ret
,
7620 ktime_to_us(ktime_sub(ktime_get(), start
)),
7621 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
7624 EXPORT_SYMBOL(ufshcd_runtime_resume
);
7626 int ufshcd_runtime_idle(struct ufs_hba
*hba
)
7630 EXPORT_SYMBOL(ufshcd_runtime_idle
);
7632 static inline ssize_t
ufshcd_pm_lvl_store(struct device
*dev
,
7633 struct device_attribute
*attr
,
7634 const char *buf
, size_t count
,
7637 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
7638 unsigned long flags
, value
;
7640 if (kstrtoul(buf
, 0, &value
))
7643 if (value
>= UFS_PM_LVL_MAX
)
7646 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7648 hba
->rpm_lvl
= value
;
7650 hba
->spm_lvl
= value
;
7651 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7655 static ssize_t
ufshcd_rpm_lvl_show(struct device
*dev
,
7656 struct device_attribute
*attr
, char *buf
)
7658 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
7662 curr_len
= snprintf(buf
, PAGE_SIZE
,
7663 "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
7665 ufschd_ufs_dev_pwr_mode_to_string(
7666 ufs_pm_lvl_states
[hba
->rpm_lvl
].dev_state
),
7667 ufschd_uic_link_state_to_string(
7668 ufs_pm_lvl_states
[hba
->rpm_lvl
].link_state
));
7670 curr_len
+= snprintf((buf
+ curr_len
), (PAGE_SIZE
- curr_len
),
7671 "\nAll available Runtime PM levels info:\n");
7672 for (lvl
= UFS_PM_LVL_0
; lvl
< UFS_PM_LVL_MAX
; lvl
++)
7673 curr_len
+= snprintf((buf
+ curr_len
), (PAGE_SIZE
- curr_len
),
7674 "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
7676 ufschd_ufs_dev_pwr_mode_to_string(
7677 ufs_pm_lvl_states
[lvl
].dev_state
),
7678 ufschd_uic_link_state_to_string(
7679 ufs_pm_lvl_states
[lvl
].link_state
));
7684 static ssize_t
ufshcd_rpm_lvl_store(struct device
*dev
,
7685 struct device_attribute
*attr
, const char *buf
, size_t count
)
7687 return ufshcd_pm_lvl_store(dev
, attr
, buf
, count
, true);
7690 static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba
*hba
)
7692 hba
->rpm_lvl_attr
.show
= ufshcd_rpm_lvl_show
;
7693 hba
->rpm_lvl_attr
.store
= ufshcd_rpm_lvl_store
;
7694 sysfs_attr_init(&hba
->rpm_lvl_attr
.attr
);
7695 hba
->rpm_lvl_attr
.attr
.name
= "rpm_lvl";
7696 hba
->rpm_lvl_attr
.attr
.mode
= 0644;
7697 if (device_create_file(hba
->dev
, &hba
->rpm_lvl_attr
))
7698 dev_err(hba
->dev
, "Failed to create sysfs for rpm_lvl\n");
7701 static ssize_t
ufshcd_spm_lvl_show(struct device
*dev
,
7702 struct device_attribute
*attr
, char *buf
)
7704 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
7708 curr_len
= snprintf(buf
, PAGE_SIZE
,
7709 "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
7711 ufschd_ufs_dev_pwr_mode_to_string(
7712 ufs_pm_lvl_states
[hba
->spm_lvl
].dev_state
),
7713 ufschd_uic_link_state_to_string(
7714 ufs_pm_lvl_states
[hba
->spm_lvl
].link_state
));
7716 curr_len
+= snprintf((buf
+ curr_len
), (PAGE_SIZE
- curr_len
),
7717 "\nAll available System PM levels info:\n");
7718 for (lvl
= UFS_PM_LVL_0
; lvl
< UFS_PM_LVL_MAX
; lvl
++)
7719 curr_len
+= snprintf((buf
+ curr_len
), (PAGE_SIZE
- curr_len
),
7720 "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
7722 ufschd_ufs_dev_pwr_mode_to_string(
7723 ufs_pm_lvl_states
[lvl
].dev_state
),
7724 ufschd_uic_link_state_to_string(
7725 ufs_pm_lvl_states
[lvl
].link_state
));
7730 static ssize_t
ufshcd_spm_lvl_store(struct device
*dev
,
7731 struct device_attribute
*attr
, const char *buf
, size_t count
)
7733 return ufshcd_pm_lvl_store(dev
, attr
, buf
, count
, false);
7736 static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba
*hba
)
7738 hba
->spm_lvl_attr
.show
= ufshcd_spm_lvl_show
;
7739 hba
->spm_lvl_attr
.store
= ufshcd_spm_lvl_store
;
7740 sysfs_attr_init(&hba
->spm_lvl_attr
.attr
);
7741 hba
->spm_lvl_attr
.attr
.name
= "spm_lvl";
7742 hba
->spm_lvl_attr
.attr
.mode
= 0644;
7743 if (device_create_file(hba
->dev
, &hba
->spm_lvl_attr
))
7744 dev_err(hba
->dev
, "Failed to create sysfs for spm_lvl\n");
7747 static inline void ufshcd_add_sysfs_nodes(struct ufs_hba
*hba
)
7749 ufshcd_add_rpm_lvl_sysfs_nodes(hba
);
7750 ufshcd_add_spm_lvl_sysfs_nodes(hba
);
7754 * ufshcd_shutdown - shutdown routine
7755 * @hba: per adapter instance
7757 * This function would power off both UFS device and UFS link.
7759 * Returns 0 always to allow force shutdown even in case of errors.
7761 int ufshcd_shutdown(struct ufs_hba
*hba
)
7765 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
))
7768 if (pm_runtime_suspended(hba
->dev
)) {
7769 ret
= ufshcd_runtime_resume(hba
);
7774 ret
= ufshcd_suspend(hba
, UFS_SHUTDOWN_PM
);
7777 dev_err(hba
->dev
, "%s failed, err %d\n", __func__
, ret
);
7778 /* allow force shutdown even in case of errors */
7781 EXPORT_SYMBOL(ufshcd_shutdown
);
7784 * ufshcd_remove - de-allocate SCSI host and host memory space
7785 * data structure memory
7786 * @hba - per adapter instance
7788 void ufshcd_remove(struct ufs_hba
*hba
)
7790 scsi_remove_host(hba
->host
);
7791 /* disable interrupts */
7792 ufshcd_disable_intr(hba
, hba
->intr_mask
);
7793 ufshcd_hba_stop(hba
, true);
7795 ufshcd_exit_clk_gating(hba
);
7796 if (ufshcd_is_clkscaling_supported(hba
))
7797 device_remove_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
);
7798 ufshcd_hba_exit(hba
);
7800 EXPORT_SYMBOL_GPL(ufshcd_remove
);
7803 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
7804 * @hba: pointer to Host Bus Adapter (HBA)
7806 void ufshcd_dealloc_host(struct ufs_hba
*hba
)
7808 scsi_host_put(hba
->host
);
7810 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host
);
7813 * ufshcd_set_dma_mask - Set dma mask based on the controller
7814 * addressing capability
7815 * @hba: per adapter instance
7817 * Returns 0 for success, non-zero for failure
7819 static int ufshcd_set_dma_mask(struct ufs_hba
*hba
)
7821 if (hba
->capabilities
& MASK_64_ADDRESSING_SUPPORT
) {
7822 if (!dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(64)))
7825 return dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(32));
7829 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
7830 * @dev: pointer to device handle
7831 * @hba_handle: driver private handle
7832 * Returns 0 on success, non-zero value on failure
7834 int ufshcd_alloc_host(struct device
*dev
, struct ufs_hba
**hba_handle
)
7836 struct Scsi_Host
*host
;
7837 struct ufs_hba
*hba
;
7842 "Invalid memory reference for dev is NULL\n");
7847 host
= scsi_host_alloc(&ufshcd_driver_template
,
7848 sizeof(struct ufs_hba
));
7850 dev_err(dev
, "scsi_host_alloc failed\n");
7854 hba
= shost_priv(host
);
7862 EXPORT_SYMBOL(ufshcd_alloc_host
);
7865 * ufshcd_init - Driver initialization routine
7866 * @hba: per-adapter instance
7867 * @mmio_base: base register address
7868 * @irq: Interrupt line of device
7869 * Returns 0 on success, non-zero value on failure
7871 int ufshcd_init(struct ufs_hba
*hba
, void __iomem
*mmio_base
, unsigned int irq
)
7874 struct Scsi_Host
*host
= hba
->host
;
7875 struct device
*dev
= hba
->dev
;
7879 "Invalid memory reference for mmio_base is NULL\n");
7884 hba
->mmio_base
= mmio_base
;
7887 /* Set descriptor lengths to specification defaults */
7888 ufshcd_def_desc_sizes(hba
);
7890 err
= ufshcd_hba_init(hba
);
7894 /* Read capabilities registers */
7895 ufshcd_hba_capabilities(hba
);
7897 /* Get UFS version supported by the controller */
7898 hba
->ufs_version
= ufshcd_get_ufs_version(hba
);
7900 if ((hba
->ufs_version
!= UFSHCI_VERSION_10
) &&
7901 (hba
->ufs_version
!= UFSHCI_VERSION_11
) &&
7902 (hba
->ufs_version
!= UFSHCI_VERSION_20
) &&
7903 (hba
->ufs_version
!= UFSHCI_VERSION_21
))
7904 dev_err(hba
->dev
, "invalid UFS version 0x%x\n",
7907 /* Get Interrupt bit mask per version */
7908 hba
->intr_mask
= ufshcd_get_intr_mask(hba
);
7910 err
= ufshcd_set_dma_mask(hba
);
7912 dev_err(hba
->dev
, "set dma mask failed\n");
7916 /* Allocate memory for host memory space */
7917 err
= ufshcd_memory_alloc(hba
);
7919 dev_err(hba
->dev
, "Memory allocation failed\n");
7924 ufshcd_host_memory_configure(hba
);
7926 host
->can_queue
= hba
->nutrs
;
7927 host
->cmd_per_lun
= hba
->nutrs
;
7928 host
->max_id
= UFSHCD_MAX_ID
;
7929 host
->max_lun
= UFS_MAX_LUNS
;
7930 host
->max_channel
= UFSHCD_MAX_CHANNEL
;
7931 host
->unique_id
= host
->host_no
;
7932 host
->max_cmd_len
= MAX_CDB_SIZE
;
7934 hba
->max_pwr_info
.is_valid
= false;
7936 /* Initailize wait queue for task management */
7937 init_waitqueue_head(&hba
->tm_wq
);
7938 init_waitqueue_head(&hba
->tm_tag_wq
);
7940 /* Initialize work queues */
7941 INIT_WORK(&hba
->eh_work
, ufshcd_err_handler
);
7942 INIT_WORK(&hba
->eeh_work
, ufshcd_exception_event_handler
);
7944 /* Initialize UIC command mutex */
7945 mutex_init(&hba
->uic_cmd_mutex
);
7947 /* Initialize mutex for device management commands */
7948 mutex_init(&hba
->dev_cmd
.lock
);
7950 init_rwsem(&hba
->clk_scaling_lock
);
7952 /* Initialize device management tag acquire wait queue */
7953 init_waitqueue_head(&hba
->dev_cmd
.tag_wq
);
7955 ufshcd_init_clk_gating(hba
);
7958 * In order to avoid any spurious interrupt immediately after
7959 * registering UFS controller interrupt handler, clear any pending UFS
7960 * interrupt status and disable all the UFS interrupts.
7962 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_INTERRUPT_STATUS
),
7963 REG_INTERRUPT_STATUS
);
7964 ufshcd_writel(hba
, 0, REG_INTERRUPT_ENABLE
);
7966 * Make sure that UFS interrupts are disabled and any pending interrupt
7967 * status is cleared before registering UFS interrupt handler.
7971 /* IRQ registration */
7972 err
= devm_request_irq(dev
, irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
, hba
);
7974 dev_err(hba
->dev
, "request irq failed\n");
7977 hba
->is_irq_enabled
= true;
7980 err
= scsi_add_host(host
, hba
->dev
);
7982 dev_err(hba
->dev
, "scsi_add_host failed\n");
7986 /* Host controller enable */
7987 err
= ufshcd_hba_enable(hba
);
7989 dev_err(hba
->dev
, "Host controller enable failed\n");
7990 ufshcd_print_host_regs(hba
);
7991 ufshcd_print_host_state(hba
);
7992 goto out_remove_scsi_host
;
7995 if (ufshcd_is_clkscaling_supported(hba
)) {
7996 char wq_name
[sizeof("ufs_clkscaling_00")];
7998 INIT_WORK(&hba
->clk_scaling
.suspend_work
,
7999 ufshcd_clk_scaling_suspend_work
);
8000 INIT_WORK(&hba
->clk_scaling
.resume_work
,
8001 ufshcd_clk_scaling_resume_work
);
8003 snprintf(wq_name
, ARRAY_SIZE(wq_name
), "ufs_clkscaling_%d",
8005 hba
->clk_scaling
.workq
= create_singlethread_workqueue(wq_name
);
8007 ufshcd_clkscaling_init_sysfs(hba
);
8011 * Set the default power management level for runtime and system PM.
8012 * Default power saving mode is to keep UFS link in Hibern8 state
8013 * and UFS device in sleep state.
8015 hba
->rpm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
8017 UIC_LINK_HIBERN8_STATE
);
8018 hba
->spm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
8020 UIC_LINK_HIBERN8_STATE
);
8022 /* Hold auto suspend until async scan completes */
8023 pm_runtime_get_sync(dev
);
8026 * We are assuming that device wasn't put in sleep/power-down
8027 * state exclusively during the boot stage before kernel.
8028 * This assumption helps avoid doing link startup twice during
8029 * ufshcd_probe_hba().
8031 ufshcd_set_ufs_dev_active(hba
);
8033 async_schedule(ufshcd_async_scan
, hba
);
8034 ufshcd_add_sysfs_nodes(hba
);
8038 out_remove_scsi_host
:
8039 scsi_remove_host(hba
->host
);
8041 ufshcd_exit_clk_gating(hba
);
8043 hba
->is_irq_enabled
= false;
8044 ufshcd_hba_exit(hba
);
8048 EXPORT_SYMBOL_GPL(ufshcd_init
);
8050 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8051 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8052 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8053 MODULE_LICENSE("GPL");
8054 MODULE_VERSION(UFSHCD_DRIVER_VERSION
);