2 * Universal Flash Storage Host controller driver Core
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
40 #include <linux/async.h>
41 #include <linux/devfreq.h>
42 #include <linux/nls.h>
44 #include <linux/bitfield.h>
46 #include "ufs_quirks.h"
48 #include "ufs-sysfs.h"
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/ufs.h>
54 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
57 /* UIC command timeout, unit: ms */
58 #define UIC_CMD_TIMEOUT 500
60 /* NOP OUT retries waiting for NOP IN response */
61 #define NOP_OUT_RETRIES 10
62 /* Timeout after 30 msecs if NOP OUT hangs without response */
63 #define NOP_OUT_TIMEOUT 30 /* msecs */
65 /* Query request retries */
66 #define QUERY_REQ_RETRIES 3
67 /* Query request timeout */
68 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
70 /* Task management command timeout */
71 #define TM_CMD_TIMEOUT 100 /* msecs */
73 /* maximum number of retries for a general UIC command */
74 #define UFS_UIC_COMMAND_RETRIES 3
76 /* maximum number of link-startup retries */
77 #define DME_LINKSTARTUP_RETRIES 3
79 /* Maximum retries for Hibern8 enter */
80 #define UIC_HIBERN8_ENTER_RETRIES 3
82 /* maximum number of reset retries before giving up */
83 #define MAX_HOST_RESET_RETRIES 5
85 /* Expose the flag value from utp_upiu_query.value */
86 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
88 /* Interrupt aggregation default timeout, unit: 40us */
89 #define INT_AGGR_DEF_TO 0x02
91 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
95 _ret = ufshcd_enable_vreg(_dev, _vreg); \
97 _ret = ufshcd_disable_vreg(_dev, _vreg); \
101 #define ufshcd_hex_dump(prefix_str, buf, len) do { \
102 size_t __len = (len); \
103 print_hex_dump(KERN_ERR, prefix_str, \
104 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
105 16, 4, buf, __len, false); \
108 int ufshcd_dump_regs(struct ufs_hba
*hba
, size_t offset
, size_t len
,
114 if (offset
% 4 != 0 || len
% 4 != 0) /* keep readl happy */
117 regs
= kzalloc(len
, GFP_KERNEL
);
121 for (pos
= 0; pos
< len
; pos
+= 4)
122 regs
[pos
/ 4] = ufshcd_readl(hba
, offset
+ pos
);
124 ufshcd_hex_dump(prefix
, regs
, len
);
129 EXPORT_SYMBOL_GPL(ufshcd_dump_regs
);
132 UFSHCD_MAX_CHANNEL
= 0,
134 UFSHCD_CMD_PER_LUN
= 32,
135 UFSHCD_CAN_QUEUE
= 32,
142 UFSHCD_STATE_OPERATIONAL
,
143 UFSHCD_STATE_EH_SCHEDULED
,
146 /* UFSHCD error handling flags */
148 UFSHCD_EH_IN_PROGRESS
= (1 << 0),
151 /* UFSHCD UIC layer error flags */
153 UFSHCD_UIC_DL_PA_INIT_ERROR
= (1 << 0), /* Data link layer error */
154 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
= (1 << 1), /* Data link layer error */
155 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
= (1 << 2), /* Data link layer error */
156 UFSHCD_UIC_NL_ERROR
= (1 << 3), /* Network layer error */
157 UFSHCD_UIC_TL_ERROR
= (1 << 4), /* Transport Layer error */
158 UFSHCD_UIC_DME_ERROR
= (1 << 5), /* DME error */
161 #define ufshcd_set_eh_in_progress(h) \
162 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
163 #define ufshcd_eh_in_progress(h) \
164 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
165 #define ufshcd_clear_eh_in_progress(h) \
166 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
168 #define ufshcd_set_ufs_dev_active(h) \
169 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
170 #define ufshcd_set_ufs_dev_sleep(h) \
171 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
172 #define ufshcd_set_ufs_dev_poweroff(h) \
173 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
174 #define ufshcd_is_ufs_dev_active(h) \
175 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
176 #define ufshcd_is_ufs_dev_sleep(h) \
177 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
178 #define ufshcd_is_ufs_dev_poweroff(h) \
179 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
181 struct ufs_pm_lvl_states ufs_pm_lvl_states
[] = {
182 {UFS_ACTIVE_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
183 {UFS_ACTIVE_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
184 {UFS_SLEEP_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
185 {UFS_SLEEP_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
186 {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
187 {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_OFF_STATE
},
190 static inline enum ufs_dev_pwr_mode
191 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl
)
193 return ufs_pm_lvl_states
[lvl
].dev_state
;
196 static inline enum uic_link_state
197 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl
)
199 return ufs_pm_lvl_states
[lvl
].link_state
;
202 static inline enum ufs_pm_level
203 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state
,
204 enum uic_link_state link_state
)
206 enum ufs_pm_level lvl
;
208 for (lvl
= UFS_PM_LVL_0
; lvl
< UFS_PM_LVL_MAX
; lvl
++) {
209 if ((ufs_pm_lvl_states
[lvl
].dev_state
== dev_state
) &&
210 (ufs_pm_lvl_states
[lvl
].link_state
== link_state
))
214 /* if no match found, return the level 0 */
218 static struct ufs_dev_fix ufs_fixups
[] = {
219 /* UFS cards deviations table */
220 UFS_FIX(UFS_VENDOR_MICRON
, UFS_ANY_MODEL
,
221 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
),
222 UFS_FIX(UFS_VENDOR_SAMSUNG
, UFS_ANY_MODEL
,
223 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
),
224 UFS_FIX(UFS_VENDOR_SAMSUNG
, UFS_ANY_MODEL
,
225 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
),
226 UFS_FIX(UFS_VENDOR_SAMSUNG
, UFS_ANY_MODEL
,
227 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
),
228 UFS_FIX(UFS_VENDOR_TOSHIBA
, UFS_ANY_MODEL
,
229 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
),
230 UFS_FIX(UFS_VENDOR_TOSHIBA
, "THGLF2G9C8KBADG",
231 UFS_DEVICE_QUIRK_PA_TACTIVATE
),
232 UFS_FIX(UFS_VENDOR_TOSHIBA
, "THGLF2G9D8KBADG",
233 UFS_DEVICE_QUIRK_PA_TACTIVATE
),
234 UFS_FIX(UFS_VENDOR_SKHYNIX
, UFS_ANY_MODEL
,
235 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME
),
236 UFS_FIX(UFS_VENDOR_SKHYNIX
, "hB8aL1" /*H28U62301AMR*/,
237 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME
),
242 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
);
243 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
);
244 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
);
245 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
);
246 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
);
247 static void ufshcd_hba_exit(struct ufs_hba
*hba
);
248 static int ufshcd_probe_hba(struct ufs_hba
*hba
);
249 static int __ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
,
251 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
);
252 static int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
);
253 static int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
);
254 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
);
255 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
);
256 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
);
257 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
258 static void __ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
259 static int ufshcd_scale_clks(struct ufs_hba
*hba
, bool scale_up
);
260 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
);
261 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
262 struct ufs_pa_layer_attr
*pwr_mode
);
263 static inline bool ufshcd_valid_tag(struct ufs_hba
*hba
, int tag
)
265 return tag
>= 0 && tag
< hba
->nutrs
;
268 static inline int ufshcd_enable_irq(struct ufs_hba
*hba
)
272 if (!hba
->is_irq_enabled
) {
273 ret
= request_irq(hba
->irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
,
276 dev_err(hba
->dev
, "%s: request_irq failed, ret=%d\n",
278 hba
->is_irq_enabled
= true;
284 static inline void ufshcd_disable_irq(struct ufs_hba
*hba
)
286 if (hba
->is_irq_enabled
) {
287 free_irq(hba
->irq
, hba
);
288 hba
->is_irq_enabled
= false;
292 static void ufshcd_scsi_unblock_requests(struct ufs_hba
*hba
)
294 if (atomic_dec_and_test(&hba
->scsi_block_reqs_cnt
))
295 scsi_unblock_requests(hba
->host
);
298 static void ufshcd_scsi_block_requests(struct ufs_hba
*hba
)
300 if (atomic_inc_return(&hba
->scsi_block_reqs_cnt
) == 1)
301 scsi_block_requests(hba
->host
);
304 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
307 struct utp_upiu_req
*rq
= hba
->lrb
[tag
].ucd_req_ptr
;
309 trace_ufshcd_upiu(dev_name(hba
->dev
), str
, &rq
->header
, &rq
->sc
.cdb
);
312 static void ufshcd_add_query_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
315 struct utp_upiu_req
*rq
= hba
->lrb
[tag
].ucd_req_ptr
;
317 trace_ufshcd_upiu(dev_name(hba
->dev
), str
, &rq
->header
, &rq
->qr
);
320 static void ufshcd_add_tm_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
323 struct utp_task_req_desc
*descp
= &hba
->utmrdl_base_addr
[tag
];
325 trace_ufshcd_upiu(dev_name(hba
->dev
), str
, &descp
->req_header
,
326 &descp
->input_param1
);
329 static void ufshcd_add_command_trace(struct ufs_hba
*hba
,
330 unsigned int tag
, const char *str
)
335 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
336 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
337 int transfer_len
= -1;
339 if (!trace_ufshcd_command_enabled()) {
340 /* trace UPIU W/O tracing command */
342 ufshcd_add_cmd_upiu_trace(hba
, tag
, str
);
346 if (cmd
) { /* data phase exists */
347 /* trace UPIU also */
348 ufshcd_add_cmd_upiu_trace(hba
, tag
, str
);
349 opcode
= cmd
->cmnd
[0];
350 if ((opcode
== READ_10
) || (opcode
== WRITE_10
)) {
352 * Currently we only fully trace read(10) and write(10)
355 if (cmd
->request
&& cmd
->request
->bio
)
356 lba
= cmd
->request
->bio
->bi_iter
.bi_sector
;
357 transfer_len
= be32_to_cpu(
358 lrbp
->ucd_req_ptr
->sc
.exp_data_transfer_len
);
362 intr
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
363 doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
364 trace_ufshcd_command(dev_name(hba
->dev
), str
, tag
,
365 doorbell
, transfer_len
, intr
, lba
, opcode
);
368 static void ufshcd_print_clk_freqs(struct ufs_hba
*hba
)
370 struct ufs_clk_info
*clki
;
371 struct list_head
*head
= &hba
->clk_list_head
;
373 if (list_empty(head
))
376 list_for_each_entry(clki
, head
, list
) {
377 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->min_freq
&&
379 dev_err(hba
->dev
, "clk: %s, rate: %u\n",
380 clki
->name
, clki
->curr_freq
);
384 static void ufshcd_print_err_hist(struct ufs_hba
*hba
,
385 struct ufs_err_reg_hist
*err_hist
,
391 for (i
= 0; i
< UFS_ERR_REG_HIST_LENGTH
; i
++) {
392 int p
= (i
+ err_hist
->pos
) % UFS_ERR_REG_HIST_LENGTH
;
394 if (err_hist
->reg
[p
] == 0)
396 dev_err(hba
->dev
, "%s[%d] = 0x%x at %lld us\n", err_name
, p
,
397 err_hist
->reg
[p
], ktime_to_us(err_hist
->tstamp
[p
]));
402 dev_err(hba
->dev
, "No record of %s errors\n", err_name
);
405 static void ufshcd_print_host_regs(struct ufs_hba
*hba
)
407 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
408 dev_err(hba
->dev
, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
409 hba
->ufs_version
, hba
->capabilities
);
411 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
412 (u32
)hba
->outstanding_reqs
, (u32
)hba
->outstanding_tasks
);
414 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
415 ktime_to_us(hba
->ufs_stats
.last_hibern8_exit_tstamp
),
416 hba
->ufs_stats
.hibern8_exit_cnt
);
418 ufshcd_print_err_hist(hba
, &hba
->ufs_stats
.pa_err
, "pa_err");
419 ufshcd_print_err_hist(hba
, &hba
->ufs_stats
.dl_err
, "dl_err");
420 ufshcd_print_err_hist(hba
, &hba
->ufs_stats
.nl_err
, "nl_err");
421 ufshcd_print_err_hist(hba
, &hba
->ufs_stats
.tl_err
, "tl_err");
422 ufshcd_print_err_hist(hba
, &hba
->ufs_stats
.dme_err
, "dme_err");
423 ufshcd_print_err_hist(hba
, &hba
->ufs_stats
.auto_hibern8_err
,
425 ufshcd_print_err_hist(hba
, &hba
->ufs_stats
.fatal_err
, "fatal_err");
426 ufshcd_print_err_hist(hba
, &hba
->ufs_stats
.link_startup_err
,
427 "link_startup_fail");
428 ufshcd_print_err_hist(hba
, &hba
->ufs_stats
.resume_err
, "resume_fail");
429 ufshcd_print_err_hist(hba
, &hba
->ufs_stats
.suspend_err
,
431 ufshcd_print_err_hist(hba
, &hba
->ufs_stats
.dev_reset
, "dev_reset");
432 ufshcd_print_err_hist(hba
, &hba
->ufs_stats
.host_reset
, "host_reset");
433 ufshcd_print_err_hist(hba
, &hba
->ufs_stats
.task_abort
, "task_abort");
435 ufshcd_print_clk_freqs(hba
);
437 if (hba
->vops
&& hba
->vops
->dbg_register_dump
)
438 hba
->vops
->dbg_register_dump(hba
);
442 void ufshcd_print_trs(struct ufs_hba
*hba
, unsigned long bitmap
, bool pr_prdt
)
444 struct ufshcd_lrb
*lrbp
;
448 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
449 lrbp
= &hba
->lrb
[tag
];
451 dev_err(hba
->dev
, "UPIU[%d] - issue time %lld us\n",
452 tag
, ktime_to_us(lrbp
->issue_time_stamp
));
453 dev_err(hba
->dev
, "UPIU[%d] - complete time %lld us\n",
454 tag
, ktime_to_us(lrbp
->compl_time_stamp
));
456 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
457 tag
, (u64
)lrbp
->utrd_dma_addr
);
459 ufshcd_hex_dump("UPIU TRD: ", lrbp
->utr_descriptor_ptr
,
460 sizeof(struct utp_transfer_req_desc
));
461 dev_err(hba
->dev
, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag
,
462 (u64
)lrbp
->ucd_req_dma_addr
);
463 ufshcd_hex_dump("UPIU REQ: ", lrbp
->ucd_req_ptr
,
464 sizeof(struct utp_upiu_req
));
465 dev_err(hba
->dev
, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag
,
466 (u64
)lrbp
->ucd_rsp_dma_addr
);
467 ufshcd_hex_dump("UPIU RSP: ", lrbp
->ucd_rsp_ptr
,
468 sizeof(struct utp_upiu_rsp
));
470 prdt_length
= le16_to_cpu(
471 lrbp
->utr_descriptor_ptr
->prd_table_length
);
473 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
475 (u64
)lrbp
->ucd_prdt_dma_addr
);
478 ufshcd_hex_dump("UPIU PRDT: ", lrbp
->ucd_prdt_ptr
,
479 sizeof(struct ufshcd_sg_entry
) * prdt_length
);
483 static void ufshcd_print_tmrs(struct ufs_hba
*hba
, unsigned long bitmap
)
487 for_each_set_bit(tag
, &bitmap
, hba
->nutmrs
) {
488 struct utp_task_req_desc
*tmrdp
= &hba
->utmrdl_base_addr
[tag
];
490 dev_err(hba
->dev
, "TM[%d] - Task Management Header\n", tag
);
491 ufshcd_hex_dump("", tmrdp
, sizeof(*tmrdp
));
495 static void ufshcd_print_host_state(struct ufs_hba
*hba
)
497 dev_err(hba
->dev
, "UFS Host state=%d\n", hba
->ufshcd_state
);
498 dev_err(hba
->dev
, "outstanding reqs=0x%lx tasks=0x%lx\n",
499 hba
->outstanding_reqs
, hba
->outstanding_tasks
);
500 dev_err(hba
->dev
, "saved_err=0x%x, saved_uic_err=0x%x\n",
501 hba
->saved_err
, hba
->saved_uic_err
);
502 dev_err(hba
->dev
, "Device power mode=%d, UIC link state=%d\n",
503 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
504 dev_err(hba
->dev
, "PM in progress=%d, sys. suspended=%d\n",
505 hba
->pm_op_in_progress
, hba
->is_sys_suspended
);
506 dev_err(hba
->dev
, "Auto BKOPS=%d, Host self-block=%d\n",
507 hba
->auto_bkops_enabled
, hba
->host
->host_self_blocked
);
508 dev_err(hba
->dev
, "Clk gate=%d\n", hba
->clk_gating
.state
);
509 dev_err(hba
->dev
, "error handling flags=0x%x, req. abort count=%d\n",
510 hba
->eh_flags
, hba
->req_abort_count
);
511 dev_err(hba
->dev
, "Host capabilities=0x%x, caps=0x%x\n",
512 hba
->capabilities
, hba
->caps
);
513 dev_err(hba
->dev
, "quirks=0x%x, dev. quirks=0x%x\n", hba
->quirks
,
518 * ufshcd_print_pwr_info - print power params as saved in hba
520 * @hba: per-adapter instance
522 static void ufshcd_print_pwr_info(struct ufs_hba
*hba
)
524 static const char * const names
[] = {
534 dev_err(hba
->dev
, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
536 hba
->pwr_info
.gear_rx
, hba
->pwr_info
.gear_tx
,
537 hba
->pwr_info
.lane_rx
, hba
->pwr_info
.lane_tx
,
538 names
[hba
->pwr_info
.pwr_rx
],
539 names
[hba
->pwr_info
.pwr_tx
],
540 hba
->pwr_info
.hs_rate
);
544 * ufshcd_wait_for_register - wait for register value to change
545 * @hba - per-adapter interface
546 * @reg - mmio register offset
547 * @mask - mask to apply to read register value
548 * @val - wait condition
549 * @interval_us - polling interval in microsecs
550 * @timeout_ms - timeout in millisecs
551 * @can_sleep - perform sleep or just spin
553 * Returns -ETIMEDOUT on error, zero on success
555 int ufshcd_wait_for_register(struct ufs_hba
*hba
, u32 reg
, u32 mask
,
556 u32 val
, unsigned long interval_us
,
557 unsigned long timeout_ms
, bool can_sleep
)
560 unsigned long timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
);
562 /* ignore bits that we don't intend to wait on */
565 while ((ufshcd_readl(hba
, reg
) & mask
) != val
) {
567 usleep_range(interval_us
, interval_us
+ 50);
570 if (time_after(jiffies
, timeout
)) {
571 if ((ufshcd_readl(hba
, reg
) & mask
) != val
)
581 * ufshcd_get_intr_mask - Get the interrupt bit mask
582 * @hba: Pointer to adapter instance
584 * Returns interrupt bit mask per version
586 static inline u32
ufshcd_get_intr_mask(struct ufs_hba
*hba
)
590 switch (hba
->ufs_version
) {
591 case UFSHCI_VERSION_10
:
592 intr_mask
= INTERRUPT_MASK_ALL_VER_10
;
594 case UFSHCI_VERSION_11
:
595 case UFSHCI_VERSION_20
:
596 intr_mask
= INTERRUPT_MASK_ALL_VER_11
;
598 case UFSHCI_VERSION_21
:
600 intr_mask
= INTERRUPT_MASK_ALL_VER_21
;
608 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
609 * @hba: Pointer to adapter instance
611 * Returns UFSHCI version supported by the controller
613 static inline u32
ufshcd_get_ufs_version(struct ufs_hba
*hba
)
615 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION
)
616 return ufshcd_vops_get_ufs_hci_version(hba
);
618 return ufshcd_readl(hba
, REG_UFS_VERSION
);
622 * ufshcd_is_device_present - Check if any device connected to
623 * the host controller
624 * @hba: pointer to adapter instance
626 * Returns true if device present, false if no device detected
628 static inline bool ufshcd_is_device_present(struct ufs_hba
*hba
)
630 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) &
631 DEVICE_PRESENT
) ? true : false;
635 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
636 * @lrbp: pointer to local command reference block
638 * This function is used to get the OCS field from UTRD
639 * Returns the OCS field in the UTRD
641 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb
*lrbp
)
643 return le32_to_cpu(lrbp
->utr_descriptor_ptr
->header
.dword_2
) & MASK_OCS
;
647 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
648 * @hba: per adapter instance
649 * @pos: position of the bit to be cleared
651 static inline void ufshcd_utrl_clear(struct ufs_hba
*hba
, u32 pos
)
653 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
654 ufshcd_writel(hba
, (1 << pos
), REG_UTP_TRANSFER_REQ_LIST_CLEAR
);
656 ufshcd_writel(hba
, ~(1 << pos
),
657 REG_UTP_TRANSFER_REQ_LIST_CLEAR
);
661 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
662 * @hba: per adapter instance
663 * @pos: position of the bit to be cleared
665 static inline void ufshcd_utmrl_clear(struct ufs_hba
*hba
, u32 pos
)
667 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
668 ufshcd_writel(hba
, (1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
670 ufshcd_writel(hba
, ~(1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
674 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
675 * @hba: per adapter instance
676 * @tag: position of the bit to be cleared
678 static inline void ufshcd_outstanding_req_clear(struct ufs_hba
*hba
, int tag
)
680 __clear_bit(tag
, &hba
->outstanding_reqs
);
684 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
685 * @reg: Register value of host controller status
687 * Returns integer, 0 on Success and positive value if failed
689 static inline int ufshcd_get_lists_status(u32 reg
)
691 return !((reg
& UFSHCD_STATUS_READY
) == UFSHCD_STATUS_READY
);
695 * ufshcd_get_uic_cmd_result - Get the UIC command result
696 * @hba: Pointer to adapter instance
698 * This function gets the result of UIC command completion
699 * Returns 0 on success, non zero value on error
701 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba
*hba
)
703 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
) &
704 MASK_UIC_COMMAND_RESULT
;
708 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
709 * @hba: Pointer to adapter instance
711 * This function gets UIC command argument3
712 * Returns 0 on success, non zero value on error
714 static inline u32
ufshcd_get_dme_attr_val(struct ufs_hba
*hba
)
716 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
);
720 * ufshcd_get_req_rsp - returns the TR response transaction type
721 * @ucd_rsp_ptr: pointer to response UPIU
724 ufshcd_get_req_rsp(struct utp_upiu_rsp
*ucd_rsp_ptr
)
726 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_0
) >> 24;
730 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
731 * @ucd_rsp_ptr: pointer to response UPIU
733 * This function gets the response status and scsi_status from response UPIU
734 * Returns the response result code.
737 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp
*ucd_rsp_ptr
)
739 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_1
) & MASK_RSP_UPIU_RESULT
;
743 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
745 * @ucd_rsp_ptr: pointer to response UPIU
747 * Return the data segment length.
749 static inline unsigned int
750 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp
*ucd_rsp_ptr
)
752 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_2
) &
753 MASK_RSP_UPIU_DATA_SEG_LEN
;
757 * ufshcd_is_exception_event - Check if the device raised an exception event
758 * @ucd_rsp_ptr: pointer to response UPIU
760 * The function checks if the device raised an exception event indicated in
761 * the Device Information field of response UPIU.
763 * Returns true if exception is raised, false otherwise.
765 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp
*ucd_rsp_ptr
)
767 return be32_to_cpu(ucd_rsp_ptr
->header
.dword_2
) &
768 MASK_RSP_EXCEPTION_EVENT
? true : false;
772 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
773 * @hba: per adapter instance
776 ufshcd_reset_intr_aggr(struct ufs_hba
*hba
)
778 ufshcd_writel(hba
, INT_AGGR_ENABLE
|
779 INT_AGGR_COUNTER_AND_TIMER_RESET
,
780 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
784 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
785 * @hba: per adapter instance
786 * @cnt: Interrupt aggregation counter threshold
787 * @tmout: Interrupt aggregation timeout value
790 ufshcd_config_intr_aggr(struct ufs_hba
*hba
, u8 cnt
, u8 tmout
)
792 ufshcd_writel(hba
, INT_AGGR_ENABLE
| INT_AGGR_PARAM_WRITE
|
793 INT_AGGR_COUNTER_THLD_VAL(cnt
) |
794 INT_AGGR_TIMEOUT_VAL(tmout
),
795 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
799 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
800 * @hba: per adapter instance
802 static inline void ufshcd_disable_intr_aggr(struct ufs_hba
*hba
)
804 ufshcd_writel(hba
, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
808 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
809 * When run-stop registers are set to 1, it indicates the
810 * host controller that it can process the requests
811 * @hba: per adapter instance
813 static void ufshcd_enable_run_stop_reg(struct ufs_hba
*hba
)
815 ufshcd_writel(hba
, UTP_TASK_REQ_LIST_RUN_STOP_BIT
,
816 REG_UTP_TASK_REQ_LIST_RUN_STOP
);
817 ufshcd_writel(hba
, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT
,
818 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP
);
822 * ufshcd_hba_start - Start controller initialization sequence
823 * @hba: per adapter instance
825 static inline void ufshcd_hba_start(struct ufs_hba
*hba
)
827 ufshcd_writel(hba
, CONTROLLER_ENABLE
, REG_CONTROLLER_ENABLE
);
831 * ufshcd_is_hba_active - Get controller state
832 * @hba: per adapter instance
834 * Returns false if controller is active, true otherwise
836 static inline bool ufshcd_is_hba_active(struct ufs_hba
*hba
)
838 return (ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
) & CONTROLLER_ENABLE
)
842 u32
ufshcd_get_local_unipro_ver(struct ufs_hba
*hba
)
844 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
845 if ((hba
->ufs_version
== UFSHCI_VERSION_10
) ||
846 (hba
->ufs_version
== UFSHCI_VERSION_11
))
847 return UFS_UNIPRO_VER_1_41
;
849 return UFS_UNIPRO_VER_1_6
;
851 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver
);
853 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba
*hba
)
856 * If both host and device support UniPro ver1.6 or later, PA layer
857 * parameters tuning happens during link startup itself.
859 * We can manually tune PA layer parameters if either host or device
860 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
861 * logic simple, we will only do manual tuning if local unipro version
862 * doesn't support ver1.6 or later.
864 if (ufshcd_get_local_unipro_ver(hba
) < UFS_UNIPRO_VER_1_6
)
870 static int ufshcd_scale_clks(struct ufs_hba
*hba
, bool scale_up
)
873 struct ufs_clk_info
*clki
;
874 struct list_head
*head
= &hba
->clk_list_head
;
875 ktime_t start
= ktime_get();
876 bool clk_state_changed
= false;
878 if (list_empty(head
))
881 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, PRE_CHANGE
);
885 list_for_each_entry(clki
, head
, list
) {
886 if (!IS_ERR_OR_NULL(clki
->clk
)) {
887 if (scale_up
&& clki
->max_freq
) {
888 if (clki
->curr_freq
== clki
->max_freq
)
891 clk_state_changed
= true;
892 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
894 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
895 __func__
, clki
->name
,
896 clki
->max_freq
, ret
);
899 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
900 "scaled up", clki
->name
,
904 clki
->curr_freq
= clki
->max_freq
;
906 } else if (!scale_up
&& clki
->min_freq
) {
907 if (clki
->curr_freq
== clki
->min_freq
)
910 clk_state_changed
= true;
911 ret
= clk_set_rate(clki
->clk
, clki
->min_freq
);
913 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
914 __func__
, clki
->name
,
915 clki
->min_freq
, ret
);
918 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
919 "scaled down", clki
->name
,
922 clki
->curr_freq
= clki
->min_freq
;
925 dev_dbg(hba
->dev
, "%s: clk: %s, rate: %lu\n", __func__
,
926 clki
->name
, clk_get_rate(clki
->clk
));
929 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, POST_CHANGE
);
932 if (clk_state_changed
)
933 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
934 (scale_up
? "up" : "down"),
935 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
940 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
941 * @hba: per adapter instance
942 * @scale_up: True if scaling up and false if scaling down
944 * Returns true if scaling is required, false otherwise.
946 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba
*hba
,
949 struct ufs_clk_info
*clki
;
950 struct list_head
*head
= &hba
->clk_list_head
;
952 if (list_empty(head
))
955 list_for_each_entry(clki
, head
, list
) {
956 if (!IS_ERR_OR_NULL(clki
->clk
)) {
957 if (scale_up
&& clki
->max_freq
) {
958 if (clki
->curr_freq
== clki
->max_freq
)
961 } else if (!scale_up
&& clki
->min_freq
) {
962 if (clki
->curr_freq
== clki
->min_freq
)
972 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba
*hba
,
979 bool timeout
= false, do_last_check
= false;
982 ufshcd_hold(hba
, false);
983 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
985 * Wait for all the outstanding tasks/transfer requests.
986 * Verify by checking the doorbell registers are clear.
990 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
) {
995 tm_doorbell
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
996 tr_doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
997 if (!tm_doorbell
&& !tr_doorbell
) {
1000 } else if (do_last_check
) {
1004 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1006 if (ktime_to_us(ktime_sub(ktime_get(), start
)) >
1010 * We might have scheduled out for long time so make
1011 * sure to check if doorbells are cleared by this time
1014 do_last_check
= true;
1016 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1017 } while (tm_doorbell
|| tr_doorbell
);
1021 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1022 __func__
, tm_doorbell
, tr_doorbell
);
1026 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1027 ufshcd_release(hba
);
1032 * ufshcd_scale_gear - scale up/down UFS gear
1033 * @hba: per adapter instance
1034 * @scale_up: True for scaling up gear and false for scaling down
1036 * Returns 0 for success,
1037 * Returns -EBUSY if scaling can't happen at this time
1038 * Returns non-zero for any other errors
1040 static int ufshcd_scale_gear(struct ufs_hba
*hba
, bool scale_up
)
1042 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1044 struct ufs_pa_layer_attr new_pwr_info
;
1047 memcpy(&new_pwr_info
, &hba
->clk_scaling
.saved_pwr_info
.info
,
1048 sizeof(struct ufs_pa_layer_attr
));
1050 memcpy(&new_pwr_info
, &hba
->pwr_info
,
1051 sizeof(struct ufs_pa_layer_attr
));
1053 if (hba
->pwr_info
.gear_tx
> UFS_MIN_GEAR_TO_SCALE_DOWN
1054 || hba
->pwr_info
.gear_rx
> UFS_MIN_GEAR_TO_SCALE_DOWN
) {
1055 /* save the current power mode */
1056 memcpy(&hba
->clk_scaling
.saved_pwr_info
.info
,
1058 sizeof(struct ufs_pa_layer_attr
));
1060 /* scale down gear */
1061 new_pwr_info
.gear_tx
= UFS_MIN_GEAR_TO_SCALE_DOWN
;
1062 new_pwr_info
.gear_rx
= UFS_MIN_GEAR_TO_SCALE_DOWN
;
1066 /* check if the power mode needs to be changed or not? */
1067 ret
= ufshcd_change_power_mode(hba
, &new_pwr_info
);
1070 dev_err(hba
->dev
, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1072 hba
->pwr_info
.gear_tx
, hba
->pwr_info
.gear_rx
,
1073 new_pwr_info
.gear_tx
, new_pwr_info
.gear_rx
);
1078 static int ufshcd_clock_scaling_prepare(struct ufs_hba
*hba
)
1080 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1083 * make sure that there are no outstanding requests when
1084 * clock scaling is in progress
1086 ufshcd_scsi_block_requests(hba
);
1087 down_write(&hba
->clk_scaling_lock
);
1088 if (ufshcd_wait_for_doorbell_clr(hba
, DOORBELL_CLR_TOUT_US
)) {
1090 up_write(&hba
->clk_scaling_lock
);
1091 ufshcd_scsi_unblock_requests(hba
);
1097 static void ufshcd_clock_scaling_unprepare(struct ufs_hba
*hba
)
1099 up_write(&hba
->clk_scaling_lock
);
1100 ufshcd_scsi_unblock_requests(hba
);
1104 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1105 * @hba: per adapter instance
1106 * @scale_up: True for scaling up and false for scalin down
1108 * Returns 0 for success,
1109 * Returns -EBUSY if scaling can't happen at this time
1110 * Returns non-zero for any other errors
1112 static int ufshcd_devfreq_scale(struct ufs_hba
*hba
, bool scale_up
)
1116 /* let's not get into low power until clock scaling is completed */
1117 ufshcd_hold(hba
, false);
1119 ret
= ufshcd_clock_scaling_prepare(hba
);
1123 /* scale down the gear before scaling down clocks */
1125 ret
= ufshcd_scale_gear(hba
, false);
1130 ret
= ufshcd_scale_clks(hba
, scale_up
);
1133 ufshcd_scale_gear(hba
, true);
1137 /* scale up the gear after scaling up clocks */
1139 ret
= ufshcd_scale_gear(hba
, true);
1141 ufshcd_scale_clks(hba
, false);
1146 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, POST_CHANGE
);
1149 ufshcd_clock_scaling_unprepare(hba
);
1150 ufshcd_release(hba
);
1154 static void ufshcd_clk_scaling_suspend_work(struct work_struct
*work
)
1156 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1157 clk_scaling
.suspend_work
);
1158 unsigned long irq_flags
;
1160 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1161 if (hba
->clk_scaling
.active_reqs
|| hba
->clk_scaling
.is_suspended
) {
1162 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1165 hba
->clk_scaling
.is_suspended
= true;
1166 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1168 __ufshcd_suspend_clkscaling(hba
);
1171 static void ufshcd_clk_scaling_resume_work(struct work_struct
*work
)
1173 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1174 clk_scaling
.resume_work
);
1175 unsigned long irq_flags
;
1177 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1178 if (!hba
->clk_scaling
.is_suspended
) {
1179 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1182 hba
->clk_scaling
.is_suspended
= false;
1183 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1185 devfreq_resume_device(hba
->devfreq
);
1188 static int ufshcd_devfreq_target(struct device
*dev
,
1189 unsigned long *freq
, u32 flags
)
1192 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1194 bool scale_up
, sched_clk_scaling_suspend_work
= false;
1195 struct list_head
*clk_list
= &hba
->clk_list_head
;
1196 struct ufs_clk_info
*clki
;
1197 unsigned long irq_flags
;
1199 if (!ufshcd_is_clkscaling_supported(hba
))
1202 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1203 if (ufshcd_eh_in_progress(hba
)) {
1204 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1208 if (!hba
->clk_scaling
.active_reqs
)
1209 sched_clk_scaling_suspend_work
= true;
1211 if (list_empty(clk_list
)) {
1212 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1216 clki
= list_first_entry(&hba
->clk_list_head
, struct ufs_clk_info
, list
);
1217 scale_up
= (*freq
== clki
->max_freq
) ? true : false;
1218 if (!ufshcd_is_devfreq_scaling_required(hba
, scale_up
)) {
1219 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1221 goto out
; /* no state change required */
1223 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1225 pm_runtime_get_noresume(hba
->dev
);
1226 if (!pm_runtime_active(hba
->dev
)) {
1227 pm_runtime_put_noidle(hba
->dev
);
1231 start
= ktime_get();
1232 ret
= ufshcd_devfreq_scale(hba
, scale_up
);
1233 pm_runtime_put(hba
->dev
);
1235 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1236 (scale_up
? "up" : "down"),
1237 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1240 if (sched_clk_scaling_suspend_work
)
1241 queue_work(hba
->clk_scaling
.workq
,
1242 &hba
->clk_scaling
.suspend_work
);
1247 static bool ufshcd_is_busy(struct request
*req
, void *priv
, bool reserved
)
1251 WARN_ON_ONCE(reserved
);
1256 /* Whether or not any tag is in use by a request that is in progress. */
1257 static bool ufshcd_any_tag_in_use(struct ufs_hba
*hba
)
1259 struct request_queue
*q
= hba
->cmd_queue
;
1262 blk_mq_tagset_busy_iter(q
->tag_set
, ufshcd_is_busy
, &busy
);
1266 static int ufshcd_devfreq_get_dev_status(struct device
*dev
,
1267 struct devfreq_dev_status
*stat
)
1269 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1270 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
1271 unsigned long flags
;
1273 if (!ufshcd_is_clkscaling_supported(hba
))
1276 memset(stat
, 0, sizeof(*stat
));
1278 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1279 if (!scaling
->window_start_t
)
1282 if (scaling
->is_busy_started
)
1283 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
1284 scaling
->busy_start_t
));
1286 stat
->total_time
= jiffies_to_usecs((long)jiffies
-
1287 (long)scaling
->window_start_t
);
1288 stat
->busy_time
= scaling
->tot_busy_t
;
1290 scaling
->window_start_t
= jiffies
;
1291 scaling
->tot_busy_t
= 0;
1293 if (hba
->outstanding_reqs
) {
1294 scaling
->busy_start_t
= ktime_get();
1295 scaling
->is_busy_started
= true;
1297 scaling
->busy_start_t
= 0;
1298 scaling
->is_busy_started
= false;
1300 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1304 static struct devfreq_dev_profile ufs_devfreq_profile
= {
1306 .target
= ufshcd_devfreq_target
,
1307 .get_dev_status
= ufshcd_devfreq_get_dev_status
,
1310 static int ufshcd_devfreq_init(struct ufs_hba
*hba
)
1312 struct list_head
*clk_list
= &hba
->clk_list_head
;
1313 struct ufs_clk_info
*clki
;
1314 struct devfreq
*devfreq
;
1317 /* Skip devfreq if we don't have any clocks in the list */
1318 if (list_empty(clk_list
))
1321 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1322 dev_pm_opp_add(hba
->dev
, clki
->min_freq
, 0);
1323 dev_pm_opp_add(hba
->dev
, clki
->max_freq
, 0);
1325 devfreq
= devfreq_add_device(hba
->dev
,
1326 &ufs_devfreq_profile
,
1327 DEVFREQ_GOV_SIMPLE_ONDEMAND
,
1329 if (IS_ERR(devfreq
)) {
1330 ret
= PTR_ERR(devfreq
);
1331 dev_err(hba
->dev
, "Unable to register with devfreq %d\n", ret
);
1333 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1334 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1338 hba
->devfreq
= devfreq
;
1343 static void ufshcd_devfreq_remove(struct ufs_hba
*hba
)
1345 struct list_head
*clk_list
= &hba
->clk_list_head
;
1346 struct ufs_clk_info
*clki
;
1351 devfreq_remove_device(hba
->devfreq
);
1352 hba
->devfreq
= NULL
;
1354 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1355 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1356 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1359 static void __ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1361 unsigned long flags
;
1363 devfreq_suspend_device(hba
->devfreq
);
1364 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1365 hba
->clk_scaling
.window_start_t
= 0;
1366 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1369 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1371 unsigned long flags
;
1372 bool suspend
= false;
1374 if (!ufshcd_is_clkscaling_supported(hba
))
1377 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1378 if (!hba
->clk_scaling
.is_suspended
) {
1380 hba
->clk_scaling
.is_suspended
= true;
1382 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1385 __ufshcd_suspend_clkscaling(hba
);
1388 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
)
1390 unsigned long flags
;
1391 bool resume
= false;
1393 if (!ufshcd_is_clkscaling_supported(hba
))
1396 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1397 if (hba
->clk_scaling
.is_suspended
) {
1399 hba
->clk_scaling
.is_suspended
= false;
1401 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1404 devfreq_resume_device(hba
->devfreq
);
1407 static ssize_t
ufshcd_clkscale_enable_show(struct device
*dev
,
1408 struct device_attribute
*attr
, char *buf
)
1410 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1412 return snprintf(buf
, PAGE_SIZE
, "%d\n", hba
->clk_scaling
.is_allowed
);
1415 static ssize_t
ufshcd_clkscale_enable_store(struct device
*dev
,
1416 struct device_attribute
*attr
, const char *buf
, size_t count
)
1418 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1422 if (kstrtou32(buf
, 0, &value
))
1426 if (value
== hba
->clk_scaling
.is_allowed
)
1429 pm_runtime_get_sync(hba
->dev
);
1430 ufshcd_hold(hba
, false);
1432 cancel_work_sync(&hba
->clk_scaling
.suspend_work
);
1433 cancel_work_sync(&hba
->clk_scaling
.resume_work
);
1435 hba
->clk_scaling
.is_allowed
= value
;
1438 ufshcd_resume_clkscaling(hba
);
1440 ufshcd_suspend_clkscaling(hba
);
1441 err
= ufshcd_devfreq_scale(hba
, true);
1443 dev_err(hba
->dev
, "%s: failed to scale clocks up %d\n",
1447 ufshcd_release(hba
);
1448 pm_runtime_put_sync(hba
->dev
);
1453 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba
*hba
)
1455 hba
->clk_scaling
.enable_attr
.show
= ufshcd_clkscale_enable_show
;
1456 hba
->clk_scaling
.enable_attr
.store
= ufshcd_clkscale_enable_store
;
1457 sysfs_attr_init(&hba
->clk_scaling
.enable_attr
.attr
);
1458 hba
->clk_scaling
.enable_attr
.attr
.name
= "clkscale_enable";
1459 hba
->clk_scaling
.enable_attr
.attr
.mode
= 0644;
1460 if (device_create_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
))
1461 dev_err(hba
->dev
, "Failed to create sysfs for clkscale_enable\n");
1464 static void ufshcd_ungate_work(struct work_struct
*work
)
1467 unsigned long flags
;
1468 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1469 clk_gating
.ungate_work
);
1471 cancel_delayed_work_sync(&hba
->clk_gating
.gate_work
);
1473 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1474 if (hba
->clk_gating
.state
== CLKS_ON
) {
1475 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1479 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1480 ufshcd_setup_clocks(hba
, true);
1482 /* Exit from hibern8 */
1483 if (ufshcd_can_hibern8_during_gating(hba
)) {
1484 /* Prevent gating in this path */
1485 hba
->clk_gating
.is_suspended
= true;
1486 if (ufshcd_is_link_hibern8(hba
)) {
1487 ret
= ufshcd_uic_hibern8_exit(hba
);
1489 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
1492 ufshcd_set_link_active(hba
);
1494 hba
->clk_gating
.is_suspended
= false;
1497 ufshcd_scsi_unblock_requests(hba
);
1501 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1502 * Also, exit from hibern8 mode and set the link as active.
1503 * @hba: per adapter instance
1504 * @async: This indicates whether caller should ungate clocks asynchronously.
1506 int ufshcd_hold(struct ufs_hba
*hba
, bool async
)
1510 unsigned long flags
;
1512 if (!ufshcd_is_clkgating_allowed(hba
))
1514 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1515 hba
->clk_gating
.active_reqs
++;
1517 if (ufshcd_eh_in_progress(hba
)) {
1518 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1523 switch (hba
->clk_gating
.state
) {
1526 * Wait for the ungate work to complete if in progress.
1527 * Though the clocks may be in ON state, the link could
1528 * still be in hibner8 state if hibern8 is allowed
1529 * during clock gating.
1530 * Make sure we exit hibern8 state also in addition to
1533 if (ufshcd_can_hibern8_during_gating(hba
) &&
1534 ufshcd_is_link_hibern8(hba
)) {
1537 hba
->clk_gating
.active_reqs
--;
1540 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1541 flush_result
= flush_work(&hba
->clk_gating
.ungate_work
);
1542 if (hba
->clk_gating
.is_suspended
&& !flush_result
)
1544 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1549 if (cancel_delayed_work(&hba
->clk_gating
.gate_work
)) {
1550 hba
->clk_gating
.state
= CLKS_ON
;
1551 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1552 hba
->clk_gating
.state
);
1556 * If we are here, it means gating work is either done or
1557 * currently running. Hence, fall through to cancel gating
1558 * work and to enable clocks.
1562 hba
->clk_gating
.state
= REQ_CLKS_ON
;
1563 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1564 hba
->clk_gating
.state
);
1565 if (queue_work(hba
->clk_gating
.clk_gating_workq
,
1566 &hba
->clk_gating
.ungate_work
))
1567 ufshcd_scsi_block_requests(hba
);
1569 * fall through to check if we should wait for this
1570 * work to be done or not.
1576 hba
->clk_gating
.active_reqs
--;
1580 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1581 flush_work(&hba
->clk_gating
.ungate_work
);
1582 /* Make sure state is CLKS_ON before returning */
1583 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1586 dev_err(hba
->dev
, "%s: clk gating is in invalid state %d\n",
1587 __func__
, hba
->clk_gating
.state
);
1590 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1594 EXPORT_SYMBOL_GPL(ufshcd_hold
);
1596 static void ufshcd_gate_work(struct work_struct
*work
)
1598 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1599 clk_gating
.gate_work
.work
);
1600 unsigned long flags
;
1602 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1604 * In case you are here to cancel this work the gating state
1605 * would be marked as REQ_CLKS_ON. In this case save time by
1606 * skipping the gating work and exit after changing the clock
1609 if (hba
->clk_gating
.is_suspended
||
1610 (hba
->clk_gating
.state
== REQ_CLKS_ON
)) {
1611 hba
->clk_gating
.state
= CLKS_ON
;
1612 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1613 hba
->clk_gating
.state
);
1617 if (hba
->clk_gating
.active_reqs
1618 || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
1619 || ufshcd_any_tag_in_use(hba
) || hba
->outstanding_tasks
1620 || hba
->active_uic_cmd
|| hba
->uic_async_done
)
1623 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1625 /* put the link into hibern8 mode before turning off clocks */
1626 if (ufshcd_can_hibern8_during_gating(hba
)) {
1627 if (ufshcd_uic_hibern8_enter(hba
)) {
1628 hba
->clk_gating
.state
= CLKS_ON
;
1629 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1630 hba
->clk_gating
.state
);
1633 ufshcd_set_link_hibern8(hba
);
1636 if (!ufshcd_is_link_active(hba
))
1637 ufshcd_setup_clocks(hba
, false);
1639 /* If link is active, device ref_clk can't be switched off */
1640 __ufshcd_setup_clocks(hba
, false, true);
1643 * In case you are here to cancel this work the gating state
1644 * would be marked as REQ_CLKS_ON. In this case keep the state
1645 * as REQ_CLKS_ON which would anyway imply that clocks are off
1646 * and a request to turn them on is pending. By doing this way,
1647 * we keep the state machine in tact and this would ultimately
1648 * prevent from doing cancel work multiple times when there are
1649 * new requests arriving before the current cancel work is done.
1651 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1652 if (hba
->clk_gating
.state
== REQ_CLKS_OFF
) {
1653 hba
->clk_gating
.state
= CLKS_OFF
;
1654 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1655 hba
->clk_gating
.state
);
1658 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1663 /* host lock must be held before calling this variant */
1664 static void __ufshcd_release(struct ufs_hba
*hba
)
1666 if (!ufshcd_is_clkgating_allowed(hba
))
1669 hba
->clk_gating
.active_reqs
--;
1671 if (hba
->clk_gating
.active_reqs
|| hba
->clk_gating
.is_suspended
1672 || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
1673 || ufshcd_any_tag_in_use(hba
) || hba
->outstanding_tasks
1674 || hba
->active_uic_cmd
|| hba
->uic_async_done
1675 || ufshcd_eh_in_progress(hba
))
1678 hba
->clk_gating
.state
= REQ_CLKS_OFF
;
1679 trace_ufshcd_clk_gating(dev_name(hba
->dev
), hba
->clk_gating
.state
);
1680 queue_delayed_work(hba
->clk_gating
.clk_gating_workq
,
1681 &hba
->clk_gating
.gate_work
,
1682 msecs_to_jiffies(hba
->clk_gating
.delay_ms
));
1685 void ufshcd_release(struct ufs_hba
*hba
)
1687 unsigned long flags
;
1689 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1690 __ufshcd_release(hba
);
1691 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1693 EXPORT_SYMBOL_GPL(ufshcd_release
);
1695 static ssize_t
ufshcd_clkgate_delay_show(struct device
*dev
,
1696 struct device_attribute
*attr
, char *buf
)
1698 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1700 return snprintf(buf
, PAGE_SIZE
, "%lu\n", hba
->clk_gating
.delay_ms
);
1703 static ssize_t
ufshcd_clkgate_delay_store(struct device
*dev
,
1704 struct device_attribute
*attr
, const char *buf
, size_t count
)
1706 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1707 unsigned long flags
, value
;
1709 if (kstrtoul(buf
, 0, &value
))
1712 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1713 hba
->clk_gating
.delay_ms
= value
;
1714 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1718 static ssize_t
ufshcd_clkgate_enable_show(struct device
*dev
,
1719 struct device_attribute
*attr
, char *buf
)
1721 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1723 return snprintf(buf
, PAGE_SIZE
, "%d\n", hba
->clk_gating
.is_enabled
);
1726 static ssize_t
ufshcd_clkgate_enable_store(struct device
*dev
,
1727 struct device_attribute
*attr
, const char *buf
, size_t count
)
1729 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1730 unsigned long flags
;
1733 if (kstrtou32(buf
, 0, &value
))
1737 if (value
== hba
->clk_gating
.is_enabled
)
1741 ufshcd_release(hba
);
1743 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1744 hba
->clk_gating
.active_reqs
++;
1745 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1748 hba
->clk_gating
.is_enabled
= value
;
1753 static void ufshcd_init_clk_scaling(struct ufs_hba
*hba
)
1755 char wq_name
[sizeof("ufs_clkscaling_00")];
1757 if (!ufshcd_is_clkscaling_supported(hba
))
1760 INIT_WORK(&hba
->clk_scaling
.suspend_work
,
1761 ufshcd_clk_scaling_suspend_work
);
1762 INIT_WORK(&hba
->clk_scaling
.resume_work
,
1763 ufshcd_clk_scaling_resume_work
);
1765 snprintf(wq_name
, sizeof(wq_name
), "ufs_clkscaling_%d",
1766 hba
->host
->host_no
);
1767 hba
->clk_scaling
.workq
= create_singlethread_workqueue(wq_name
);
1769 ufshcd_clkscaling_init_sysfs(hba
);
1772 static void ufshcd_exit_clk_scaling(struct ufs_hba
*hba
)
1774 if (!ufshcd_is_clkscaling_supported(hba
))
1777 destroy_workqueue(hba
->clk_scaling
.workq
);
1778 ufshcd_devfreq_remove(hba
);
1781 static void ufshcd_init_clk_gating(struct ufs_hba
*hba
)
1783 char wq_name
[sizeof("ufs_clk_gating_00")];
1785 if (!ufshcd_is_clkgating_allowed(hba
))
1788 hba
->clk_gating
.delay_ms
= 150;
1789 INIT_DELAYED_WORK(&hba
->clk_gating
.gate_work
, ufshcd_gate_work
);
1790 INIT_WORK(&hba
->clk_gating
.ungate_work
, ufshcd_ungate_work
);
1792 snprintf(wq_name
, ARRAY_SIZE(wq_name
), "ufs_clk_gating_%d",
1793 hba
->host
->host_no
);
1794 hba
->clk_gating
.clk_gating_workq
= alloc_ordered_workqueue(wq_name
,
1797 hba
->clk_gating
.is_enabled
= true;
1799 hba
->clk_gating
.delay_attr
.show
= ufshcd_clkgate_delay_show
;
1800 hba
->clk_gating
.delay_attr
.store
= ufshcd_clkgate_delay_store
;
1801 sysfs_attr_init(&hba
->clk_gating
.delay_attr
.attr
);
1802 hba
->clk_gating
.delay_attr
.attr
.name
= "clkgate_delay_ms";
1803 hba
->clk_gating
.delay_attr
.attr
.mode
= 0644;
1804 if (device_create_file(hba
->dev
, &hba
->clk_gating
.delay_attr
))
1805 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_delay\n");
1807 hba
->clk_gating
.enable_attr
.show
= ufshcd_clkgate_enable_show
;
1808 hba
->clk_gating
.enable_attr
.store
= ufshcd_clkgate_enable_store
;
1809 sysfs_attr_init(&hba
->clk_gating
.enable_attr
.attr
);
1810 hba
->clk_gating
.enable_attr
.attr
.name
= "clkgate_enable";
1811 hba
->clk_gating
.enable_attr
.attr
.mode
= 0644;
1812 if (device_create_file(hba
->dev
, &hba
->clk_gating
.enable_attr
))
1813 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_enable\n");
1816 static void ufshcd_exit_clk_gating(struct ufs_hba
*hba
)
1818 if (!ufshcd_is_clkgating_allowed(hba
))
1820 device_remove_file(hba
->dev
, &hba
->clk_gating
.delay_attr
);
1821 device_remove_file(hba
->dev
, &hba
->clk_gating
.enable_attr
);
1822 cancel_work_sync(&hba
->clk_gating
.ungate_work
);
1823 cancel_delayed_work_sync(&hba
->clk_gating
.gate_work
);
1824 destroy_workqueue(hba
->clk_gating
.clk_gating_workq
);
1827 /* Must be called with host lock acquired */
1828 static void ufshcd_clk_scaling_start_busy(struct ufs_hba
*hba
)
1830 bool queue_resume_work
= false;
1832 if (!ufshcd_is_clkscaling_supported(hba
))
1835 if (!hba
->clk_scaling
.active_reqs
++)
1836 queue_resume_work
= true;
1838 if (!hba
->clk_scaling
.is_allowed
|| hba
->pm_op_in_progress
)
1841 if (queue_resume_work
)
1842 queue_work(hba
->clk_scaling
.workq
,
1843 &hba
->clk_scaling
.resume_work
);
1845 if (!hba
->clk_scaling
.window_start_t
) {
1846 hba
->clk_scaling
.window_start_t
= jiffies
;
1847 hba
->clk_scaling
.tot_busy_t
= 0;
1848 hba
->clk_scaling
.is_busy_started
= false;
1851 if (!hba
->clk_scaling
.is_busy_started
) {
1852 hba
->clk_scaling
.busy_start_t
= ktime_get();
1853 hba
->clk_scaling
.is_busy_started
= true;
1857 static void ufshcd_clk_scaling_update_busy(struct ufs_hba
*hba
)
1859 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
1861 if (!ufshcd_is_clkscaling_supported(hba
))
1864 if (!hba
->outstanding_reqs
&& scaling
->is_busy_started
) {
1865 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
1866 scaling
->busy_start_t
));
1867 scaling
->busy_start_t
= 0;
1868 scaling
->is_busy_started
= false;
1872 * ufshcd_send_command - Send SCSI or device management commands
1873 * @hba: per adapter instance
1874 * @task_tag: Task tag of the command
1877 void ufshcd_send_command(struct ufs_hba
*hba
, unsigned int task_tag
)
1879 hba
->lrb
[task_tag
].issue_time_stamp
= ktime_get();
1880 hba
->lrb
[task_tag
].compl_time_stamp
= ktime_set(0, 0);
1881 ufshcd_add_command_trace(hba
, task_tag
, "send");
1882 ufshcd_clk_scaling_start_busy(hba
);
1883 __set_bit(task_tag
, &hba
->outstanding_reqs
);
1884 ufshcd_writel(hba
, 1 << task_tag
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
1885 /* Make sure that doorbell is committed immediately */
1890 * ufshcd_copy_sense_data - Copy sense data in case of check condition
1891 * @lrbp: pointer to local reference block
1893 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb
*lrbp
)
1896 if (lrbp
->sense_buffer
&&
1897 ufshcd_get_rsp_upiu_data_seg_len(lrbp
->ucd_rsp_ptr
)) {
1900 len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.sense_data_len
);
1901 len_to_copy
= min_t(int, UFS_SENSE_SIZE
, len
);
1903 memcpy(lrbp
->sense_buffer
, lrbp
->ucd_rsp_ptr
->sr
.sense_data
,
1909 * ufshcd_copy_query_response() - Copy the Query Response and the data
1911 * @hba: per adapter instance
1912 * @lrbp: pointer to local reference block
1915 int ufshcd_copy_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
1917 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
1919 memcpy(&query_res
->upiu_res
, &lrbp
->ucd_rsp_ptr
->qr
, QUERY_OSF_SIZE
);
1921 /* Get the descriptor */
1922 if (hba
->dev_cmd
.query
.descriptor
&&
1923 lrbp
->ucd_rsp_ptr
->qr
.opcode
== UPIU_QUERY_OPCODE_READ_DESC
) {
1924 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+
1925 GENERAL_UPIU_REQUEST_SIZE
;
1929 /* data segment length */
1930 resp_len
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->header
.dword_2
) &
1931 MASK_QUERY_DATA_SEG_LEN
;
1932 buf_len
= be16_to_cpu(
1933 hba
->dev_cmd
.query
.request
.upiu_req
.length
);
1934 if (likely(buf_len
>= resp_len
)) {
1935 memcpy(hba
->dev_cmd
.query
.descriptor
, descp
, resp_len
);
1938 "%s: Response size is bigger than buffer",
1948 * ufshcd_hba_capabilities - Read controller capabilities
1949 * @hba: per adapter instance
1951 static inline void ufshcd_hba_capabilities(struct ufs_hba
*hba
)
1953 hba
->capabilities
= ufshcd_readl(hba
, REG_CONTROLLER_CAPABILITIES
);
1955 /* nutrs and nutmrs are 0 based values */
1956 hba
->nutrs
= (hba
->capabilities
& MASK_TRANSFER_REQUESTS_SLOTS
) + 1;
1958 ((hba
->capabilities
& MASK_TASK_MANAGEMENT_REQUEST_SLOTS
) >> 16) + 1;
1962 * ufshcd_ready_for_uic_cmd - Check if controller is ready
1963 * to accept UIC commands
1964 * @hba: per adapter instance
1965 * Return true on success, else false
1967 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba
*hba
)
1969 if (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & UIC_COMMAND_READY
)
1976 * ufshcd_get_upmcrs - Get the power mode change request status
1977 * @hba: Pointer to adapter instance
1979 * This function gets the UPMCRS field of HCS register
1980 * Returns value of UPMCRS field
1982 static inline u8
ufshcd_get_upmcrs(struct ufs_hba
*hba
)
1984 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) >> 8) & 0x7;
1988 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1989 * @hba: per adapter instance
1990 * @uic_cmd: UIC command
1992 * Mutex must be held.
1995 ufshcd_dispatch_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
1997 WARN_ON(hba
->active_uic_cmd
);
1999 hba
->active_uic_cmd
= uic_cmd
;
2002 ufshcd_writel(hba
, uic_cmd
->argument1
, REG_UIC_COMMAND_ARG_1
);
2003 ufshcd_writel(hba
, uic_cmd
->argument2
, REG_UIC_COMMAND_ARG_2
);
2004 ufshcd_writel(hba
, uic_cmd
->argument3
, REG_UIC_COMMAND_ARG_3
);
2007 ufshcd_writel(hba
, uic_cmd
->command
& COMMAND_OPCODE_MASK
,
2012 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2013 * @hba: per adapter instance
2014 * @uic_cmd: UIC command
2016 * Must be called with mutex held.
2017 * Returns 0 only if success.
2020 ufshcd_wait_for_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2023 unsigned long flags
;
2025 if (wait_for_completion_timeout(&uic_cmd
->done
,
2026 msecs_to_jiffies(UIC_CMD_TIMEOUT
)))
2027 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2031 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2032 hba
->active_uic_cmd
= NULL
;
2033 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2039 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2040 * @hba: per adapter instance
2041 * @uic_cmd: UIC command
2042 * @completion: initialize the completion only if this is set to true
2044 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2045 * with mutex held and host_lock locked.
2046 * Returns 0 only if success.
2049 __ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
,
2052 if (!ufshcd_ready_for_uic_cmd(hba
)) {
2054 "Controller not ready to accept UIC commands\n");
2059 init_completion(&uic_cmd
->done
);
2061 ufshcd_dispatch_uic_cmd(hba
, uic_cmd
);
2067 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2068 * @hba: per adapter instance
2069 * @uic_cmd: UIC command
2071 * Returns 0 only if success.
2073 int ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2076 unsigned long flags
;
2078 ufshcd_hold(hba
, false);
2079 mutex_lock(&hba
->uic_cmd_mutex
);
2080 ufshcd_add_delay_before_dme_cmd(hba
);
2082 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2083 ret
= __ufshcd_send_uic_cmd(hba
, uic_cmd
, true);
2084 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2086 ret
= ufshcd_wait_for_uic_cmd(hba
, uic_cmd
);
2088 mutex_unlock(&hba
->uic_cmd_mutex
);
2090 ufshcd_release(hba
);
2095 * ufshcd_map_sg - Map scatter-gather list to prdt
2096 * @hba: per adapter instance
2097 * @lrbp: pointer to local reference block
2099 * Returns 0 in case of success, non-zero value in case of failure
2101 static int ufshcd_map_sg(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2103 struct ufshcd_sg_entry
*prd_table
;
2104 struct scatterlist
*sg
;
2105 struct scsi_cmnd
*cmd
;
2110 sg_segments
= scsi_dma_map(cmd
);
2111 if (sg_segments
< 0)
2115 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
2116 lrbp
->utr_descriptor_ptr
->prd_table_length
=
2117 cpu_to_le16((u16
)(sg_segments
*
2118 sizeof(struct ufshcd_sg_entry
)));
2120 lrbp
->utr_descriptor_ptr
->prd_table_length
=
2121 cpu_to_le16((u16
) (sg_segments
));
2123 prd_table
= (struct ufshcd_sg_entry
*)lrbp
->ucd_prdt_ptr
;
2125 scsi_for_each_sg(cmd
, sg
, sg_segments
, i
) {
2127 cpu_to_le32(((u32
) sg_dma_len(sg
))-1);
2128 prd_table
[i
].base_addr
=
2129 cpu_to_le32(lower_32_bits(sg
->dma_address
));
2130 prd_table
[i
].upper_addr
=
2131 cpu_to_le32(upper_32_bits(sg
->dma_address
));
2132 prd_table
[i
].reserved
= 0;
2135 lrbp
->utr_descriptor_ptr
->prd_table_length
= 0;
2142 * ufshcd_enable_intr - enable interrupts
2143 * @hba: per adapter instance
2144 * @intrs: interrupt bits
2146 static void ufshcd_enable_intr(struct ufs_hba
*hba
, u32 intrs
)
2148 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2150 if (hba
->ufs_version
== UFSHCI_VERSION_10
) {
2152 rw
= set
& INTERRUPT_MASK_RW_VER_10
;
2153 set
= rw
| ((set
^ intrs
) & intrs
);
2158 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2162 * ufshcd_disable_intr - disable interrupts
2163 * @hba: per adapter instance
2164 * @intrs: interrupt bits
2166 static void ufshcd_disable_intr(struct ufs_hba
*hba
, u32 intrs
)
2168 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2170 if (hba
->ufs_version
== UFSHCI_VERSION_10
) {
2172 rw
= (set
& INTERRUPT_MASK_RW_VER_10
) &
2173 ~(intrs
& INTERRUPT_MASK_RW_VER_10
);
2174 set
= rw
| ((set
& intrs
) & ~INTERRUPT_MASK_RW_VER_10
);
2180 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2184 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2185 * descriptor according to request
2186 * @lrbp: pointer to local reference block
2187 * @upiu_flags: flags required in the header
2188 * @cmd_dir: requests data direction
2190 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb
*lrbp
,
2191 u32
*upiu_flags
, enum dma_data_direction cmd_dir
)
2193 struct utp_transfer_req_desc
*req_desc
= lrbp
->utr_descriptor_ptr
;
2197 if (cmd_dir
== DMA_FROM_DEVICE
) {
2198 data_direction
= UTP_DEVICE_TO_HOST
;
2199 *upiu_flags
= UPIU_CMD_FLAGS_READ
;
2200 } else if (cmd_dir
== DMA_TO_DEVICE
) {
2201 data_direction
= UTP_HOST_TO_DEVICE
;
2202 *upiu_flags
= UPIU_CMD_FLAGS_WRITE
;
2204 data_direction
= UTP_NO_DATA_TRANSFER
;
2205 *upiu_flags
= UPIU_CMD_FLAGS_NONE
;
2208 dword_0
= data_direction
| (lrbp
->command_type
2209 << UPIU_COMMAND_TYPE_OFFSET
);
2211 dword_0
|= UTP_REQ_DESC_INT_CMD
;
2213 /* Transfer request descriptor header fields */
2214 req_desc
->header
.dword_0
= cpu_to_le32(dword_0
);
2215 /* dword_1 is reserved, hence it is set to 0 */
2216 req_desc
->header
.dword_1
= 0;
2218 * assigning invalid value for command status. Controller
2219 * updates OCS on command completion, with the command
2222 req_desc
->header
.dword_2
=
2223 cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
2224 /* dword_3 is reserved, hence it is set to 0 */
2225 req_desc
->header
.dword_3
= 0;
2227 req_desc
->prd_table_length
= 0;
2231 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2233 * @lrbp: local reference block pointer
2234 * @upiu_flags: flags
2237 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb
*lrbp
, u32 upiu_flags
)
2239 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2240 unsigned short cdb_len
;
2242 /* command descriptor fields */
2243 ucd_req_ptr
->header
.dword_0
= UPIU_HEADER_DWORD(
2244 UPIU_TRANSACTION_COMMAND
, upiu_flags
,
2245 lrbp
->lun
, lrbp
->task_tag
);
2246 ucd_req_ptr
->header
.dword_1
= UPIU_HEADER_DWORD(
2247 UPIU_COMMAND_SET_TYPE_SCSI
, 0, 0, 0);
2249 /* Total EHS length and Data segment length will be zero */
2250 ucd_req_ptr
->header
.dword_2
= 0;
2252 ucd_req_ptr
->sc
.exp_data_transfer_len
=
2253 cpu_to_be32(lrbp
->cmd
->sdb
.length
);
2255 cdb_len
= min_t(unsigned short, lrbp
->cmd
->cmd_len
, UFS_CDB_SIZE
);
2256 memset(ucd_req_ptr
->sc
.cdb
, 0, UFS_CDB_SIZE
);
2257 memcpy(ucd_req_ptr
->sc
.cdb
, lrbp
->cmd
->cmnd
, cdb_len
);
2259 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2263 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2266 * @lrbp: local reference block pointer
2267 * @upiu_flags: flags
2269 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba
*hba
,
2270 struct ufshcd_lrb
*lrbp
, u32 upiu_flags
)
2272 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2273 struct ufs_query
*query
= &hba
->dev_cmd
.query
;
2274 u16 len
= be16_to_cpu(query
->request
.upiu_req
.length
);
2276 /* Query request header */
2277 ucd_req_ptr
->header
.dword_0
= UPIU_HEADER_DWORD(
2278 UPIU_TRANSACTION_QUERY_REQ
, upiu_flags
,
2279 lrbp
->lun
, lrbp
->task_tag
);
2280 ucd_req_ptr
->header
.dword_1
= UPIU_HEADER_DWORD(
2281 0, query
->request
.query_func
, 0, 0);
2283 /* Data segment length only need for WRITE_DESC */
2284 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2285 ucd_req_ptr
->header
.dword_2
=
2286 UPIU_HEADER_DWORD(0, 0, (len
>> 8), (u8
)len
);
2288 ucd_req_ptr
->header
.dword_2
= 0;
2290 /* Copy the Query Request buffer as is */
2291 memcpy(&ucd_req_ptr
->qr
, &query
->request
.upiu_req
,
2294 /* Copy the Descriptor */
2295 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2296 memcpy(ucd_req_ptr
+ 1, query
->descriptor
, len
);
2298 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2301 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb
*lrbp
)
2303 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2305 memset(ucd_req_ptr
, 0, sizeof(struct utp_upiu_req
));
2307 /* command descriptor fields */
2308 ucd_req_ptr
->header
.dword_0
=
2310 UPIU_TRANSACTION_NOP_OUT
, 0, 0, lrbp
->task_tag
);
2311 /* clear rest of the fields of basic header */
2312 ucd_req_ptr
->header
.dword_1
= 0;
2313 ucd_req_ptr
->header
.dword_2
= 0;
2315 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2319 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2320 * for Device Management Purposes
2321 * @hba: per adapter instance
2322 * @lrbp: pointer to local reference block
2324 static int ufshcd_comp_devman_upiu(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2329 if ((hba
->ufs_version
== UFSHCI_VERSION_10
) ||
2330 (hba
->ufs_version
== UFSHCI_VERSION_11
))
2331 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
2333 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2335 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
);
2336 if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_QUERY
)
2337 ufshcd_prepare_utp_query_req_upiu(hba
, lrbp
, upiu_flags
);
2338 else if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_NOP
)
2339 ufshcd_prepare_utp_nop_upiu(lrbp
);
2347 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2349 * @hba: per adapter instance
2350 * @lrbp: pointer to local reference block
2352 static int ufshcd_comp_scsi_upiu(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2357 if ((hba
->ufs_version
== UFSHCI_VERSION_10
) ||
2358 (hba
->ufs_version
== UFSHCI_VERSION_11
))
2359 lrbp
->command_type
= UTP_CMD_TYPE_SCSI
;
2361 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2363 if (likely(lrbp
->cmd
)) {
2364 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
,
2365 lrbp
->cmd
->sc_data_direction
);
2366 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp
, upiu_flags
);
2375 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2376 * @upiu_wlun_id: UPIU W-LUN id
2378 * Returns SCSI W-LUN id
2380 static inline u16
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id
)
2382 return (upiu_wlun_id
& ~UFS_UPIU_WLUN_ID
) | SCSI_W_LUN_BASE
;
2386 * ufshcd_queuecommand - main entry point for SCSI requests
2387 * @host: SCSI host pointer
2388 * @cmd: command from SCSI Midlayer
2390 * Returns 0 for success, non-zero in case of failure
2392 static int ufshcd_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*cmd
)
2394 struct ufshcd_lrb
*lrbp
;
2395 struct ufs_hba
*hba
;
2396 unsigned long flags
;
2400 hba
= shost_priv(host
);
2402 tag
= cmd
->request
->tag
;
2403 if (!ufshcd_valid_tag(hba
, tag
)) {
2405 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2406 __func__
, tag
, cmd
, cmd
->request
);
2410 if (!down_read_trylock(&hba
->clk_scaling_lock
))
2411 return SCSI_MLQUEUE_HOST_BUSY
;
2413 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2414 switch (hba
->ufshcd_state
) {
2415 case UFSHCD_STATE_OPERATIONAL
:
2417 case UFSHCD_STATE_EH_SCHEDULED
:
2418 case UFSHCD_STATE_RESET
:
2419 err
= SCSI_MLQUEUE_HOST_BUSY
;
2421 case UFSHCD_STATE_ERROR
:
2422 set_host_byte(cmd
, DID_ERROR
);
2423 cmd
->scsi_done(cmd
);
2426 dev_WARN_ONCE(hba
->dev
, 1, "%s: invalid state %d\n",
2427 __func__
, hba
->ufshcd_state
);
2428 set_host_byte(cmd
, DID_BAD_TARGET
);
2429 cmd
->scsi_done(cmd
);
2433 /* if error handling is in progress, don't issue commands */
2434 if (ufshcd_eh_in_progress(hba
)) {
2435 set_host_byte(cmd
, DID_ERROR
);
2436 cmd
->scsi_done(cmd
);
2439 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2441 hba
->req_abort_count
= 0;
2443 err
= ufshcd_hold(hba
, true);
2445 err
= SCSI_MLQUEUE_HOST_BUSY
;
2448 WARN_ON(hba
->clk_gating
.state
!= CLKS_ON
);
2450 lrbp
= &hba
->lrb
[tag
];
2454 lrbp
->sense_bufflen
= UFS_SENSE_SIZE
;
2455 lrbp
->sense_buffer
= cmd
->sense_buffer
;
2456 lrbp
->task_tag
= tag
;
2457 lrbp
->lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
2458 lrbp
->intr_cmd
= !ufshcd_is_intr_aggr_allowed(hba
) ? true : false;
2459 lrbp
->req_abort_skip
= false;
2461 ufshcd_comp_scsi_upiu(hba
, lrbp
);
2463 err
= ufshcd_map_sg(hba
, lrbp
);
2465 ufshcd_release(hba
);
2469 /* Make sure descriptors are ready before ringing the doorbell */
2472 /* issue command to the controller */
2473 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2474 ufshcd_vops_setup_xfer_req(hba
, tag
, (lrbp
->cmd
? true : false));
2475 ufshcd_send_command(hba
, tag
);
2477 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2479 up_read(&hba
->clk_scaling_lock
);
2483 static int ufshcd_compose_dev_cmd(struct ufs_hba
*hba
,
2484 struct ufshcd_lrb
*lrbp
, enum dev_cmd_type cmd_type
, int tag
)
2487 lrbp
->sense_bufflen
= 0;
2488 lrbp
->sense_buffer
= NULL
;
2489 lrbp
->task_tag
= tag
;
2490 lrbp
->lun
= 0; /* device management cmd is not specific to any LUN */
2491 lrbp
->intr_cmd
= true; /* No interrupt aggregation */
2492 hba
->dev_cmd
.type
= cmd_type
;
2494 return ufshcd_comp_devman_upiu(hba
, lrbp
);
2498 ufshcd_clear_cmd(struct ufs_hba
*hba
, int tag
)
2501 unsigned long flags
;
2502 u32 mask
= 1 << tag
;
2504 /* clear outstanding transaction before retry */
2505 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2506 ufshcd_utrl_clear(hba
, tag
);
2507 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2510 * wait for for h/w to clear corresponding bit in door-bell.
2511 * max. wait is 1 sec.
2513 err
= ufshcd_wait_for_register(hba
,
2514 REG_UTP_TRANSFER_REQ_DOOR_BELL
,
2515 mask
, ~mask
, 1000, 1000, true);
2521 ufshcd_check_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2523 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
2525 /* Get the UPIU response */
2526 query_res
->response
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
) >>
2527 UPIU_RSP_CODE_OFFSET
;
2528 return query_res
->response
;
2532 * ufshcd_dev_cmd_completion() - handles device management command responses
2533 * @hba: per adapter instance
2534 * @lrbp: pointer to local reference block
2537 ufshcd_dev_cmd_completion(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2542 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
2543 resp
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
2546 case UPIU_TRANSACTION_NOP_IN
:
2547 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_NOP
) {
2549 dev_err(hba
->dev
, "%s: unexpected response %x\n",
2553 case UPIU_TRANSACTION_QUERY_RSP
:
2554 err
= ufshcd_check_query_response(hba
, lrbp
);
2556 err
= ufshcd_copy_query_response(hba
, lrbp
);
2558 case UPIU_TRANSACTION_REJECT_UPIU
:
2559 /* TODO: handle Reject UPIU Response */
2561 dev_err(hba
->dev
, "%s: Reject UPIU not fully implemented\n",
2566 dev_err(hba
->dev
, "%s: Invalid device management cmd response: %x\n",
2574 static int ufshcd_wait_for_dev_cmd(struct ufs_hba
*hba
,
2575 struct ufshcd_lrb
*lrbp
, int max_timeout
)
2578 unsigned long time_left
;
2579 unsigned long flags
;
2581 time_left
= wait_for_completion_timeout(hba
->dev_cmd
.complete
,
2582 msecs_to_jiffies(max_timeout
));
2584 /* Make sure descriptors are ready before ringing the doorbell */
2586 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2587 hba
->dev_cmd
.complete
= NULL
;
2588 if (likely(time_left
)) {
2589 err
= ufshcd_get_tr_ocs(lrbp
);
2591 err
= ufshcd_dev_cmd_completion(hba
, lrbp
);
2593 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2597 dev_dbg(hba
->dev
, "%s: dev_cmd request timedout, tag %d\n",
2598 __func__
, lrbp
->task_tag
);
2599 if (!ufshcd_clear_cmd(hba
, lrbp
->task_tag
))
2600 /* successfully cleared the command, retry if needed */
2603 * in case of an error, after clearing the doorbell,
2604 * we also need to clear the outstanding_request
2607 ufshcd_outstanding_req_clear(hba
, lrbp
->task_tag
);
2614 * ufshcd_exec_dev_cmd - API for sending device management requests
2616 * @cmd_type: specifies the type (NOP, Query...)
2617 * @timeout: timeout in milliseconds
2619 * NOTE: Since there is only one available tag for device management commands,
2620 * it is expected you hold the hba->dev_cmd.lock mutex.
2622 static int ufshcd_exec_dev_cmd(struct ufs_hba
*hba
,
2623 enum dev_cmd_type cmd_type
, int timeout
)
2625 struct request_queue
*q
= hba
->cmd_queue
;
2626 struct request
*req
;
2627 struct ufshcd_lrb
*lrbp
;
2630 struct completion wait
;
2631 unsigned long flags
;
2633 down_read(&hba
->clk_scaling_lock
);
2636 * Get free slot, sleep if slots are unavailable.
2637 * Even though we use wait_event() which sleeps indefinitely,
2638 * the maximum wait time is bounded by SCSI request timeout.
2640 req
= blk_get_request(q
, REQ_OP_DRV_OUT
, 0);
2646 WARN_ON_ONCE(!ufshcd_valid_tag(hba
, tag
));
2647 /* Set the timeout such that the SCSI error handler is not activated. */
2648 req
->timeout
= msecs_to_jiffies(2 * timeout
);
2649 blk_mq_start_request(req
);
2651 init_completion(&wait
);
2652 lrbp
= &hba
->lrb
[tag
];
2654 err
= ufshcd_compose_dev_cmd(hba
, lrbp
, cmd_type
, tag
);
2658 hba
->dev_cmd
.complete
= &wait
;
2660 ufshcd_add_query_upiu_trace(hba
, tag
, "query_send");
2661 /* Make sure descriptors are ready before ringing the doorbell */
2663 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2664 ufshcd_vops_setup_xfer_req(hba
, tag
, (lrbp
->cmd
? true : false));
2665 ufshcd_send_command(hba
, tag
);
2666 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2668 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, timeout
);
2670 ufshcd_add_query_upiu_trace(hba
, tag
,
2671 err
? "query_complete_err" : "query_complete");
2674 blk_put_request(req
);
2676 up_read(&hba
->clk_scaling_lock
);
2681 * ufshcd_init_query() - init the query response and request parameters
2682 * @hba: per-adapter instance
2683 * @request: address of the request pointer to be initialized
2684 * @response: address of the response pointer to be initialized
2685 * @opcode: operation to perform
2686 * @idn: flag idn to access
2687 * @index: LU number to access
2688 * @selector: query/flag/descriptor further identification
2690 static inline void ufshcd_init_query(struct ufs_hba
*hba
,
2691 struct ufs_query_req
**request
, struct ufs_query_res
**response
,
2692 enum query_opcode opcode
, u8 idn
, u8 index
, u8 selector
)
2694 *request
= &hba
->dev_cmd
.query
.request
;
2695 *response
= &hba
->dev_cmd
.query
.response
;
2696 memset(*request
, 0, sizeof(struct ufs_query_req
));
2697 memset(*response
, 0, sizeof(struct ufs_query_res
));
2698 (*request
)->upiu_req
.opcode
= opcode
;
2699 (*request
)->upiu_req
.idn
= idn
;
2700 (*request
)->upiu_req
.index
= index
;
2701 (*request
)->upiu_req
.selector
= selector
;
2704 static int ufshcd_query_flag_retry(struct ufs_hba
*hba
,
2705 enum query_opcode opcode
, enum flag_idn idn
, bool *flag_res
)
2710 for (retries
= 0; retries
< QUERY_REQ_RETRIES
; retries
++) {
2711 ret
= ufshcd_query_flag(hba
, opcode
, idn
, flag_res
);
2714 "%s: failed with error %d, retries %d\n",
2715 __func__
, ret
, retries
);
2722 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2723 __func__
, opcode
, idn
, ret
, retries
);
2728 * ufshcd_query_flag() - API function for sending flag query requests
2729 * @hba: per-adapter instance
2730 * @opcode: flag query to perform
2731 * @idn: flag idn to access
2732 * @flag_res: the flag value after the query request completes
2734 * Returns 0 for success, non-zero in case of failure
2736 int ufshcd_query_flag(struct ufs_hba
*hba
, enum query_opcode opcode
,
2737 enum flag_idn idn
, bool *flag_res
)
2739 struct ufs_query_req
*request
= NULL
;
2740 struct ufs_query_res
*response
= NULL
;
2741 int err
, index
= 0, selector
= 0;
2742 int timeout
= QUERY_REQ_TIMEOUT
;
2746 ufshcd_hold(hba
, false);
2747 mutex_lock(&hba
->dev_cmd
.lock
);
2748 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
2752 case UPIU_QUERY_OPCODE_SET_FLAG
:
2753 case UPIU_QUERY_OPCODE_CLEAR_FLAG
:
2754 case UPIU_QUERY_OPCODE_TOGGLE_FLAG
:
2755 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
2757 case UPIU_QUERY_OPCODE_READ_FLAG
:
2758 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
2760 /* No dummy reads */
2761 dev_err(hba
->dev
, "%s: Invalid argument for read request\n",
2769 "%s: Expected query flag opcode but got = %d\n",
2775 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, timeout
);
2779 "%s: Sending flag query for idn %d failed, err = %d\n",
2780 __func__
, idn
, err
);
2785 *flag_res
= (be32_to_cpu(response
->upiu_res
.value
) &
2786 MASK_QUERY_UPIU_FLAG_LOC
) & 0x1;
2789 mutex_unlock(&hba
->dev_cmd
.lock
);
2790 ufshcd_release(hba
);
2795 * ufshcd_query_attr - API function for sending attribute requests
2796 * @hba: per-adapter instance
2797 * @opcode: attribute opcode
2798 * @idn: attribute idn to access
2799 * @index: index field
2800 * @selector: selector field
2801 * @attr_val: the attribute value after the query request completes
2803 * Returns 0 for success, non-zero in case of failure
2805 int ufshcd_query_attr(struct ufs_hba
*hba
, enum query_opcode opcode
,
2806 enum attr_idn idn
, u8 index
, u8 selector
, u32
*attr_val
)
2808 struct ufs_query_req
*request
= NULL
;
2809 struct ufs_query_res
*response
= NULL
;
2814 ufshcd_hold(hba
, false);
2816 dev_err(hba
->dev
, "%s: attribute value required for opcode 0x%x\n",
2822 mutex_lock(&hba
->dev_cmd
.lock
);
2823 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
2827 case UPIU_QUERY_OPCODE_WRITE_ATTR
:
2828 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
2829 request
->upiu_req
.value
= cpu_to_be32(*attr_val
);
2831 case UPIU_QUERY_OPCODE_READ_ATTR
:
2832 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
2835 dev_err(hba
->dev
, "%s: Expected query attr opcode but got = 0x%.2x\n",
2841 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
2844 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2845 __func__
, opcode
, idn
, index
, err
);
2849 *attr_val
= be32_to_cpu(response
->upiu_res
.value
);
2852 mutex_unlock(&hba
->dev_cmd
.lock
);
2854 ufshcd_release(hba
);
2859 * ufshcd_query_attr_retry() - API function for sending query
2860 * attribute with retries
2861 * @hba: per-adapter instance
2862 * @opcode: attribute opcode
2863 * @idn: attribute idn to access
2864 * @index: index field
2865 * @selector: selector field
2866 * @attr_val: the attribute value after the query request
2869 * Returns 0 for success, non-zero in case of failure
2871 static int ufshcd_query_attr_retry(struct ufs_hba
*hba
,
2872 enum query_opcode opcode
, enum attr_idn idn
, u8 index
, u8 selector
,
2878 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
2879 ret
= ufshcd_query_attr(hba
, opcode
, idn
, index
,
2880 selector
, attr_val
);
2882 dev_dbg(hba
->dev
, "%s: failed with error %d, retries %d\n",
2883 __func__
, ret
, retries
);
2890 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2891 __func__
, idn
, ret
, QUERY_REQ_RETRIES
);
2895 static int __ufshcd_query_descriptor(struct ufs_hba
*hba
,
2896 enum query_opcode opcode
, enum desc_idn idn
, u8 index
,
2897 u8 selector
, u8
*desc_buf
, int *buf_len
)
2899 struct ufs_query_req
*request
= NULL
;
2900 struct ufs_query_res
*response
= NULL
;
2905 ufshcd_hold(hba
, false);
2907 dev_err(hba
->dev
, "%s: descriptor buffer required for opcode 0x%x\n",
2913 if (*buf_len
< QUERY_DESC_MIN_SIZE
|| *buf_len
> QUERY_DESC_MAX_SIZE
) {
2914 dev_err(hba
->dev
, "%s: descriptor buffer size (%d) is out of range\n",
2915 __func__
, *buf_len
);
2920 mutex_lock(&hba
->dev_cmd
.lock
);
2921 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
2923 hba
->dev_cmd
.query
.descriptor
= desc_buf
;
2924 request
->upiu_req
.length
= cpu_to_be16(*buf_len
);
2927 case UPIU_QUERY_OPCODE_WRITE_DESC
:
2928 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
2930 case UPIU_QUERY_OPCODE_READ_DESC
:
2931 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
2935 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2941 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
2944 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2945 __func__
, opcode
, idn
, index
, err
);
2949 *buf_len
= be16_to_cpu(response
->upiu_res
.length
);
2952 hba
->dev_cmd
.query
.descriptor
= NULL
;
2953 mutex_unlock(&hba
->dev_cmd
.lock
);
2955 ufshcd_release(hba
);
2960 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
2961 * @hba: per-adapter instance
2962 * @opcode: attribute opcode
2963 * @idn: attribute idn to access
2964 * @index: index field
2965 * @selector: selector field
2966 * @desc_buf: the buffer that contains the descriptor
2967 * @buf_len: length parameter passed to the device
2969 * Returns 0 for success, non-zero in case of failure.
2970 * The buf_len parameter will contain, on return, the length parameter
2971 * received on the response.
2973 int ufshcd_query_descriptor_retry(struct ufs_hba
*hba
,
2974 enum query_opcode opcode
,
2975 enum desc_idn idn
, u8 index
,
2977 u8
*desc_buf
, int *buf_len
)
2982 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
2983 err
= __ufshcd_query_descriptor(hba
, opcode
, idn
, index
,
2984 selector
, desc_buf
, buf_len
);
2985 if (!err
|| err
== -EINVAL
)
2993 * ufshcd_read_desc_length - read the specified descriptor length from header
2994 * @hba: Pointer to adapter instance
2995 * @desc_id: descriptor idn value
2996 * @desc_index: descriptor index
2997 * @desc_length: pointer to variable to read the length of descriptor
2999 * Return 0 in case of success, non-zero otherwise
3001 static int ufshcd_read_desc_length(struct ufs_hba
*hba
,
3002 enum desc_idn desc_id
,
3007 u8 header
[QUERY_DESC_HDR_SIZE
];
3008 int header_len
= QUERY_DESC_HDR_SIZE
;
3010 if (desc_id
>= QUERY_DESC_IDN_MAX
)
3013 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
3014 desc_id
, desc_index
, 0, header
,
3018 dev_err(hba
->dev
, "%s: Failed to get descriptor header id %d",
3021 } else if (desc_id
!= header
[QUERY_DESC_DESC_TYPE_OFFSET
]) {
3022 dev_warn(hba
->dev
, "%s: descriptor header id %d and desc_id %d mismatch",
3023 __func__
, header
[QUERY_DESC_DESC_TYPE_OFFSET
],
3028 *desc_length
= header
[QUERY_DESC_LENGTH_OFFSET
];
3034 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3035 * @hba: Pointer to adapter instance
3036 * @desc_id: descriptor idn value
3037 * @desc_len: mapped desc length (out)
3039 * Return 0 in case of success, non-zero otherwise
3041 int ufshcd_map_desc_id_to_length(struct ufs_hba
*hba
,
3042 enum desc_idn desc_id
, int *desc_len
)
3045 case QUERY_DESC_IDN_DEVICE
:
3046 *desc_len
= hba
->desc_size
.dev_desc
;
3048 case QUERY_DESC_IDN_POWER
:
3049 *desc_len
= hba
->desc_size
.pwr_desc
;
3051 case QUERY_DESC_IDN_GEOMETRY
:
3052 *desc_len
= hba
->desc_size
.geom_desc
;
3054 case QUERY_DESC_IDN_CONFIGURATION
:
3055 *desc_len
= hba
->desc_size
.conf_desc
;
3057 case QUERY_DESC_IDN_UNIT
:
3058 *desc_len
= hba
->desc_size
.unit_desc
;
3060 case QUERY_DESC_IDN_INTERCONNECT
:
3061 *desc_len
= hba
->desc_size
.interc_desc
;
3063 case QUERY_DESC_IDN_STRING
:
3064 *desc_len
= QUERY_DESC_MAX_SIZE
;
3066 case QUERY_DESC_IDN_HEALTH
:
3067 *desc_len
= hba
->desc_size
.hlth_desc
;
3069 case QUERY_DESC_IDN_RFU_0
:
3070 case QUERY_DESC_IDN_RFU_1
:
3079 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length
);
3082 * ufshcd_read_desc_param - read the specified descriptor parameter
3083 * @hba: Pointer to adapter instance
3084 * @desc_id: descriptor idn value
3085 * @desc_index: descriptor index
3086 * @param_offset: offset of the parameter to read
3087 * @param_read_buf: pointer to buffer where parameter would be read
3088 * @param_size: sizeof(param_read_buf)
3090 * Return 0 in case of success, non-zero otherwise
3092 int ufshcd_read_desc_param(struct ufs_hba
*hba
,
3093 enum desc_idn desc_id
,
3102 bool is_kmalloc
= true;
3105 if (desc_id
>= QUERY_DESC_IDN_MAX
|| !param_size
)
3108 /* Get the max length of descriptor from structure filled up at probe
3111 ret
= ufshcd_map_desc_id_to_length(hba
, desc_id
, &buff_len
);
3114 if (ret
|| !buff_len
) {
3115 dev_err(hba
->dev
, "%s: Failed to get full descriptor length",
3120 /* Check whether we need temp memory */
3121 if (param_offset
!= 0 || param_size
< buff_len
) {
3122 desc_buf
= kmalloc(buff_len
, GFP_KERNEL
);
3126 desc_buf
= param_read_buf
;
3130 /* Request for full descriptor */
3131 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
3132 desc_id
, desc_index
, 0,
3133 desc_buf
, &buff_len
);
3136 dev_err(hba
->dev
, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3137 __func__
, desc_id
, desc_index
, param_offset
, ret
);
3142 if (desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
] != desc_id
) {
3143 dev_err(hba
->dev
, "%s: invalid desc_id %d in descriptor header",
3144 __func__
, desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
]);
3149 /* Check wherher we will not copy more data, than available */
3150 if (is_kmalloc
&& param_size
> buff_len
)
3151 param_size
= buff_len
;
3154 memcpy(param_read_buf
, &desc_buf
[param_offset
], param_size
);
3161 static inline int ufshcd_read_desc(struct ufs_hba
*hba
,
3162 enum desc_idn desc_id
,
3167 return ufshcd_read_desc_param(hba
, desc_id
, desc_index
, 0, buf
, size
);
3170 static inline int ufshcd_read_power_desc(struct ufs_hba
*hba
,
3174 return ufshcd_read_desc(hba
, QUERY_DESC_IDN_POWER
, 0, buf
, size
);
3177 static int ufshcd_read_device_desc(struct ufs_hba
*hba
, u8
*buf
, u32 size
)
3179 return ufshcd_read_desc(hba
, QUERY_DESC_IDN_DEVICE
, 0, buf
, size
);
3183 * struct uc_string_id - unicode string
3185 * @len: size of this descriptor inclusive
3186 * @type: descriptor type
3187 * @uc: unicode string character
3189 struct uc_string_id
{
3195 /* replace non-printable or non-ASCII characters with spaces */
3196 static inline char ufshcd_remove_non_printable(u8 ch
)
3198 return (ch
>= 0x20 && ch
<= 0x7e) ? ch
: ' ';
3202 * ufshcd_read_string_desc - read string descriptor
3203 * @hba: pointer to adapter instance
3204 * @desc_index: descriptor index
3205 * @buf: pointer to buffer where descriptor would be read,
3206 * the caller should free the memory.
3207 * @ascii: if true convert from unicode to ascii characters
3208 * null terminated string.
3211 * * string size on success.
3212 * * -ENOMEM: on allocation failure
3213 * * -EINVAL: on a wrong parameter
3215 int ufshcd_read_string_desc(struct ufs_hba
*hba
, u8 desc_index
,
3216 u8
**buf
, bool ascii
)
3218 struct uc_string_id
*uc_str
;
3225 uc_str
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
3229 ret
= ufshcd_read_desc(hba
, QUERY_DESC_IDN_STRING
,
3231 QUERY_DESC_MAX_SIZE
);
3233 dev_err(hba
->dev
, "Reading String Desc failed after %d retries. err = %d\n",
3234 QUERY_REQ_RETRIES
, ret
);
3239 if (uc_str
->len
<= QUERY_DESC_HDR_SIZE
) {
3240 dev_dbg(hba
->dev
, "String Desc is of zero length\n");
3249 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3250 ascii_len
= (uc_str
->len
- QUERY_DESC_HDR_SIZE
) / 2 + 1;
3251 str
= kzalloc(ascii_len
, GFP_KERNEL
);
3258 * the descriptor contains string in UTF16 format
3259 * we need to convert to utf-8 so it can be displayed
3261 ret
= utf16s_to_utf8s(uc_str
->uc
,
3262 uc_str
->len
- QUERY_DESC_HDR_SIZE
,
3263 UTF16_BIG_ENDIAN
, str
, ascii_len
);
3265 /* replace non-printable or non-ASCII characters with spaces */
3266 for (i
= 0; i
< ret
; i
++)
3267 str
[i
] = ufshcd_remove_non_printable(str
[i
]);
3272 str
= kmemdup(uc_str
, uc_str
->len
, GFP_KERNEL
);
3286 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3287 * @hba: Pointer to adapter instance
3289 * @param_offset: offset of the parameter to read
3290 * @param_read_buf: pointer to buffer where parameter would be read
3291 * @param_size: sizeof(param_read_buf)
3293 * Return 0 in case of success, non-zero otherwise
3295 static inline int ufshcd_read_unit_desc_param(struct ufs_hba
*hba
,
3297 enum unit_desc_param param_offset
,
3302 * Unit descriptors are only available for general purpose LUs (LUN id
3303 * from 0 to 7) and RPMB Well known LU.
3305 if (!ufs_is_valid_unit_desc_lun(lun
))
3308 return ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_UNIT
, lun
,
3309 param_offset
, param_read_buf
, param_size
);
3313 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3314 * @hba: per adapter instance
3316 * 1. Allocate DMA memory for Command Descriptor array
3317 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3318 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3319 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3321 * 4. Allocate memory for local reference block(lrb).
3323 * Returns 0 for success, non-zero in case of failure
3325 static int ufshcd_memory_alloc(struct ufs_hba
*hba
)
3327 size_t utmrdl_size
, utrdl_size
, ucdl_size
;
3329 /* Allocate memory for UTP command descriptors */
3330 ucdl_size
= (sizeof(struct utp_transfer_cmd_desc
) * hba
->nutrs
);
3331 hba
->ucdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3333 &hba
->ucdl_dma_addr
,
3337 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3338 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3339 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3340 * be aligned to 128 bytes as well
3342 if (!hba
->ucdl_base_addr
||
3343 WARN_ON(hba
->ucdl_dma_addr
& (PAGE_SIZE
- 1))) {
3345 "Command Descriptor Memory allocation failed\n");
3350 * Allocate memory for UTP Transfer descriptors
3351 * UFSHCI requires 1024 byte alignment of UTRD
3353 utrdl_size
= (sizeof(struct utp_transfer_req_desc
) * hba
->nutrs
);
3354 hba
->utrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3356 &hba
->utrdl_dma_addr
,
3358 if (!hba
->utrdl_base_addr
||
3359 WARN_ON(hba
->utrdl_dma_addr
& (PAGE_SIZE
- 1))) {
3361 "Transfer Descriptor Memory allocation failed\n");
3366 * Allocate memory for UTP Task Management descriptors
3367 * UFSHCI requires 1024 byte alignment of UTMRD
3369 utmrdl_size
= sizeof(struct utp_task_req_desc
) * hba
->nutmrs
;
3370 hba
->utmrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3372 &hba
->utmrdl_dma_addr
,
3374 if (!hba
->utmrdl_base_addr
||
3375 WARN_ON(hba
->utmrdl_dma_addr
& (PAGE_SIZE
- 1))) {
3377 "Task Management Descriptor Memory allocation failed\n");
3381 /* Allocate memory for local reference block */
3382 hba
->lrb
= devm_kcalloc(hba
->dev
,
3383 hba
->nutrs
, sizeof(struct ufshcd_lrb
),
3386 dev_err(hba
->dev
, "LRB Memory allocation failed\n");
3395 * ufshcd_host_memory_configure - configure local reference block with
3397 * @hba: per adapter instance
3399 * Configure Host memory space
3400 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3402 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3404 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3405 * into local reference block.
3407 static void ufshcd_host_memory_configure(struct ufs_hba
*hba
)
3409 struct utp_transfer_cmd_desc
*cmd_descp
;
3410 struct utp_transfer_req_desc
*utrdlp
;
3411 dma_addr_t cmd_desc_dma_addr
;
3412 dma_addr_t cmd_desc_element_addr
;
3413 u16 response_offset
;
3418 utrdlp
= hba
->utrdl_base_addr
;
3419 cmd_descp
= hba
->ucdl_base_addr
;
3422 offsetof(struct utp_transfer_cmd_desc
, response_upiu
);
3424 offsetof(struct utp_transfer_cmd_desc
, prd_table
);
3426 cmd_desc_size
= sizeof(struct utp_transfer_cmd_desc
);
3427 cmd_desc_dma_addr
= hba
->ucdl_dma_addr
;
3429 for (i
= 0; i
< hba
->nutrs
; i
++) {
3430 /* Configure UTRD with command descriptor base address */
3431 cmd_desc_element_addr
=
3432 (cmd_desc_dma_addr
+ (cmd_desc_size
* i
));
3433 utrdlp
[i
].command_desc_base_addr_lo
=
3434 cpu_to_le32(lower_32_bits(cmd_desc_element_addr
));
3435 utrdlp
[i
].command_desc_base_addr_hi
=
3436 cpu_to_le32(upper_32_bits(cmd_desc_element_addr
));
3438 /* Response upiu and prdt offset should be in double words */
3439 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
) {
3440 utrdlp
[i
].response_upiu_offset
=
3441 cpu_to_le16(response_offset
);
3442 utrdlp
[i
].prd_table_offset
=
3443 cpu_to_le16(prdt_offset
);
3444 utrdlp
[i
].response_upiu_length
=
3445 cpu_to_le16(ALIGNED_UPIU_SIZE
);
3447 utrdlp
[i
].response_upiu_offset
=
3448 cpu_to_le16((response_offset
>> 2));
3449 utrdlp
[i
].prd_table_offset
=
3450 cpu_to_le16((prdt_offset
>> 2));
3451 utrdlp
[i
].response_upiu_length
=
3452 cpu_to_le16(ALIGNED_UPIU_SIZE
>> 2);
3455 hba
->lrb
[i
].utr_descriptor_ptr
= (utrdlp
+ i
);
3456 hba
->lrb
[i
].utrd_dma_addr
= hba
->utrdl_dma_addr
+
3457 (i
* sizeof(struct utp_transfer_req_desc
));
3458 hba
->lrb
[i
].ucd_req_ptr
=
3459 (struct utp_upiu_req
*)(cmd_descp
+ i
);
3460 hba
->lrb
[i
].ucd_req_dma_addr
= cmd_desc_element_addr
;
3461 hba
->lrb
[i
].ucd_rsp_ptr
=
3462 (struct utp_upiu_rsp
*)cmd_descp
[i
].response_upiu
;
3463 hba
->lrb
[i
].ucd_rsp_dma_addr
= cmd_desc_element_addr
+
3465 hba
->lrb
[i
].ucd_prdt_ptr
=
3466 (struct ufshcd_sg_entry
*)cmd_descp
[i
].prd_table
;
3467 hba
->lrb
[i
].ucd_prdt_dma_addr
= cmd_desc_element_addr
+
3473 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3474 * @hba: per adapter instance
3476 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3477 * in order to initialize the Unipro link startup procedure.
3478 * Once the Unipro links are up, the device connected to the controller
3481 * Returns 0 on success, non-zero value on failure
3483 static int ufshcd_dme_link_startup(struct ufs_hba
*hba
)
3485 struct uic_command uic_cmd
= {0};
3488 uic_cmd
.command
= UIC_CMD_DME_LINK_STARTUP
;
3490 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3493 "dme-link-startup: error code %d\n", ret
);
3497 * ufshcd_dme_reset - UIC command for DME_RESET
3498 * @hba: per adapter instance
3500 * DME_RESET command is issued in order to reset UniPro stack.
3501 * This function now deal with cold reset.
3503 * Returns 0 on success, non-zero value on failure
3505 static int ufshcd_dme_reset(struct ufs_hba
*hba
)
3507 struct uic_command uic_cmd
= {0};
3510 uic_cmd
.command
= UIC_CMD_DME_RESET
;
3512 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3515 "dme-reset: error code %d\n", ret
);
3521 * ufshcd_dme_enable - UIC command for DME_ENABLE
3522 * @hba: per adapter instance
3524 * DME_ENABLE command is issued in order to enable UniPro stack.
3526 * Returns 0 on success, non-zero value on failure
3528 static int ufshcd_dme_enable(struct ufs_hba
*hba
)
3530 struct uic_command uic_cmd
= {0};
3533 uic_cmd
.command
= UIC_CMD_DME_ENABLE
;
3535 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3538 "dme-enable: error code %d\n", ret
);
3543 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
)
3545 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3546 unsigned long min_sleep_time_us
;
3548 if (!(hba
->quirks
& UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
))
3552 * last_dme_cmd_tstamp will be 0 only for 1st call to
3555 if (unlikely(!ktime_to_us(hba
->last_dme_cmd_tstamp
))) {
3556 min_sleep_time_us
= MIN_DELAY_BEFORE_DME_CMDS_US
;
3558 unsigned long delta
=
3559 (unsigned long) ktime_to_us(
3560 ktime_sub(ktime_get(),
3561 hba
->last_dme_cmd_tstamp
));
3563 if (delta
< MIN_DELAY_BEFORE_DME_CMDS_US
)
3565 MIN_DELAY_BEFORE_DME_CMDS_US
- delta
;
3567 return; /* no more delay required */
3570 /* allow sleep for extra 50us if needed */
3571 usleep_range(min_sleep_time_us
, min_sleep_time_us
+ 50);
3575 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3576 * @hba: per adapter instance
3577 * @attr_sel: uic command argument1
3578 * @attr_set: attribute set type as uic command argument2
3579 * @mib_val: setting value as uic command argument3
3580 * @peer: indicate whether peer or local
3582 * Returns 0 on success, non-zero value on failure
3584 int ufshcd_dme_set_attr(struct ufs_hba
*hba
, u32 attr_sel
,
3585 u8 attr_set
, u32 mib_val
, u8 peer
)
3587 struct uic_command uic_cmd
= {0};
3588 static const char *const action
[] = {
3592 const char *set
= action
[!!peer
];
3594 int retries
= UFS_UIC_COMMAND_RETRIES
;
3596 uic_cmd
.command
= peer
?
3597 UIC_CMD_DME_PEER_SET
: UIC_CMD_DME_SET
;
3598 uic_cmd
.argument1
= attr_sel
;
3599 uic_cmd
.argument2
= UIC_ARG_ATTR_TYPE(attr_set
);
3600 uic_cmd
.argument3
= mib_val
;
3603 /* for peer attributes we retry upon failure */
3604 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3606 dev_dbg(hba
->dev
, "%s: attr-id 0x%x val 0x%x error code %d\n",
3607 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
, ret
);
3608 } while (ret
&& peer
&& --retries
);
3611 dev_err(hba
->dev
, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3612 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
,
3613 UFS_UIC_COMMAND_RETRIES
- retries
);
3617 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr
);
3620 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3621 * @hba: per adapter instance
3622 * @attr_sel: uic command argument1
3623 * @mib_val: the value of the attribute as returned by the UIC command
3624 * @peer: indicate whether peer or local
3626 * Returns 0 on success, non-zero value on failure
3628 int ufshcd_dme_get_attr(struct ufs_hba
*hba
, u32 attr_sel
,
3629 u32
*mib_val
, u8 peer
)
3631 struct uic_command uic_cmd
= {0};
3632 static const char *const action
[] = {
3636 const char *get
= action
[!!peer
];
3638 int retries
= UFS_UIC_COMMAND_RETRIES
;
3639 struct ufs_pa_layer_attr orig_pwr_info
;
3640 struct ufs_pa_layer_attr temp_pwr_info
;
3641 bool pwr_mode_change
= false;
3643 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)) {
3644 orig_pwr_info
= hba
->pwr_info
;
3645 temp_pwr_info
= orig_pwr_info
;
3647 if (orig_pwr_info
.pwr_tx
== FAST_MODE
||
3648 orig_pwr_info
.pwr_rx
== FAST_MODE
) {
3649 temp_pwr_info
.pwr_tx
= FASTAUTO_MODE
;
3650 temp_pwr_info
.pwr_rx
= FASTAUTO_MODE
;
3651 pwr_mode_change
= true;
3652 } else if (orig_pwr_info
.pwr_tx
== SLOW_MODE
||
3653 orig_pwr_info
.pwr_rx
== SLOW_MODE
) {
3654 temp_pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
3655 temp_pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
3656 pwr_mode_change
= true;
3658 if (pwr_mode_change
) {
3659 ret
= ufshcd_change_power_mode(hba
, &temp_pwr_info
);
3665 uic_cmd
.command
= peer
?
3666 UIC_CMD_DME_PEER_GET
: UIC_CMD_DME_GET
;
3667 uic_cmd
.argument1
= attr_sel
;
3670 /* for peer attributes we retry upon failure */
3671 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
3673 dev_dbg(hba
->dev
, "%s: attr-id 0x%x error code %d\n",
3674 get
, UIC_GET_ATTR_ID(attr_sel
), ret
);
3675 } while (ret
&& peer
&& --retries
);
3678 dev_err(hba
->dev
, "%s: attr-id 0x%x failed %d retries\n",
3679 get
, UIC_GET_ATTR_ID(attr_sel
),
3680 UFS_UIC_COMMAND_RETRIES
- retries
);
3682 if (mib_val
&& !ret
)
3683 *mib_val
= uic_cmd
.argument3
;
3685 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)
3687 ufshcd_change_power_mode(hba
, &orig_pwr_info
);
3691 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr
);
3694 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3695 * state) and waits for it to take effect.
3697 * @hba: per adapter instance
3698 * @cmd: UIC command to execute
3700 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3701 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3702 * and device UniPro link and hence it's final completion would be indicated by
3703 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3704 * addition to normal UIC command completion Status (UCCS). This function only
3705 * returns after the relevant status bits indicate the completion.
3707 * Returns 0 on success, non-zero value on failure
3709 static int ufshcd_uic_pwr_ctrl(struct ufs_hba
*hba
, struct uic_command
*cmd
)
3711 struct completion uic_async_done
;
3712 unsigned long flags
;
3715 bool reenable_intr
= false;
3717 mutex_lock(&hba
->uic_cmd_mutex
);
3718 init_completion(&uic_async_done
);
3719 ufshcd_add_delay_before_dme_cmd(hba
);
3721 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3722 hba
->uic_async_done
= &uic_async_done
;
3723 if (ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
) & UIC_COMMAND_COMPL
) {
3724 ufshcd_disable_intr(hba
, UIC_COMMAND_COMPL
);
3726 * Make sure UIC command completion interrupt is disabled before
3727 * issuing UIC command.
3730 reenable_intr
= true;
3732 ret
= __ufshcd_send_uic_cmd(hba
, cmd
, false);
3733 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3736 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3737 cmd
->command
, cmd
->argument3
, ret
);
3741 if (!wait_for_completion_timeout(hba
->uic_async_done
,
3742 msecs_to_jiffies(UIC_CMD_TIMEOUT
))) {
3744 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3745 cmd
->command
, cmd
->argument3
);
3750 status
= ufshcd_get_upmcrs(hba
);
3751 if (status
!= PWR_LOCAL
) {
3753 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3754 cmd
->command
, status
);
3755 ret
= (status
!= PWR_OK
) ? status
: -1;
3759 ufshcd_print_host_state(hba
);
3760 ufshcd_print_pwr_info(hba
);
3761 ufshcd_print_host_regs(hba
);
3764 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3765 hba
->active_uic_cmd
= NULL
;
3766 hba
->uic_async_done
= NULL
;
3768 ufshcd_enable_intr(hba
, UIC_COMMAND_COMPL
);
3769 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3770 mutex_unlock(&hba
->uic_cmd_mutex
);
3776 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3777 * using DME_SET primitives.
3778 * @hba: per adapter instance
3779 * @mode: powr mode value
3781 * Returns 0 on success, non-zero value on failure
3783 static int ufshcd_uic_change_pwr_mode(struct ufs_hba
*hba
, u8 mode
)
3785 struct uic_command uic_cmd
= {0};
3788 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
) {
3789 ret
= ufshcd_dme_set(hba
,
3790 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP
, 0), 1);
3792 dev_err(hba
->dev
, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3798 uic_cmd
.command
= UIC_CMD_DME_SET
;
3799 uic_cmd
.argument1
= UIC_ARG_MIB(PA_PWRMODE
);
3800 uic_cmd
.argument3
= mode
;
3801 ufshcd_hold(hba
, false);
3802 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
3803 ufshcd_release(hba
);
3809 static int ufshcd_link_recovery(struct ufs_hba
*hba
)
3812 unsigned long flags
;
3814 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3815 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
3816 ufshcd_set_eh_in_progress(hba
);
3817 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3819 ret
= ufshcd_host_reset_and_restore(hba
);
3821 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3823 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
3824 ufshcd_clear_eh_in_progress(hba
);
3825 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3828 dev_err(hba
->dev
, "%s: link recovery failed, err %d",
3834 static int __ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
)
3837 struct uic_command uic_cmd
= {0};
3838 ktime_t start
= ktime_get();
3840 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
, PRE_CHANGE
);
3842 uic_cmd
.command
= UIC_CMD_DME_HIBER_ENTER
;
3843 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
3844 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "enter",
3845 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
3850 dev_err(hba
->dev
, "%s: hibern8 enter failed. ret = %d\n",
3854 * If link recovery fails then return error code returned from
3855 * ufshcd_link_recovery().
3856 * If link recovery succeeds then return -EAGAIN to attempt
3857 * hibern8 enter retry again.
3859 err
= ufshcd_link_recovery(hba
);
3861 dev_err(hba
->dev
, "%s: link recovery failed", __func__
);
3867 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
,
3873 static int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
)
3875 int ret
= 0, retries
;
3877 for (retries
= UIC_HIBERN8_ENTER_RETRIES
; retries
> 0; retries
--) {
3878 ret
= __ufshcd_uic_hibern8_enter(hba
);
3886 static int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
)
3888 struct uic_command uic_cmd
= {0};
3890 ktime_t start
= ktime_get();
3892 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
, PRE_CHANGE
);
3894 uic_cmd
.command
= UIC_CMD_DME_HIBER_EXIT
;
3895 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
3896 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "exit",
3897 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
3900 dev_err(hba
->dev
, "%s: hibern8 exit failed. ret = %d\n",
3902 ret
= ufshcd_link_recovery(hba
);
3904 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
,
3906 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_get();
3907 hba
->ufs_stats
.hibern8_exit_cnt
++;
3913 void ufshcd_auto_hibern8_enable(struct ufs_hba
*hba
)
3915 unsigned long flags
;
3917 if (!ufshcd_is_auto_hibern8_supported(hba
) || !hba
->ahit
)
3920 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3921 ufshcd_writel(hba
, hba
->ahit
, REG_AUTO_HIBERNATE_IDLE_TIMER
);
3922 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3926 * ufshcd_init_pwr_info - setting the POR (power on reset)
3927 * values in hba power info
3928 * @hba: per-adapter instance
3930 static void ufshcd_init_pwr_info(struct ufs_hba
*hba
)
3932 hba
->pwr_info
.gear_rx
= UFS_PWM_G1
;
3933 hba
->pwr_info
.gear_tx
= UFS_PWM_G1
;
3934 hba
->pwr_info
.lane_rx
= 1;
3935 hba
->pwr_info
.lane_tx
= 1;
3936 hba
->pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
3937 hba
->pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
3938 hba
->pwr_info
.hs_rate
= 0;
3942 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3943 * @hba: per-adapter instance
3945 static int ufshcd_get_max_pwr_mode(struct ufs_hba
*hba
)
3947 struct ufs_pa_layer_attr
*pwr_info
= &hba
->max_pwr_info
.info
;
3949 if (hba
->max_pwr_info
.is_valid
)
3952 pwr_info
->pwr_tx
= FAST_MODE
;
3953 pwr_info
->pwr_rx
= FAST_MODE
;
3954 pwr_info
->hs_rate
= PA_HS_MODE_B
;
3956 /* Get the connected lane count */
3957 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES
),
3958 &pwr_info
->lane_rx
);
3959 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
3960 &pwr_info
->lane_tx
);
3962 if (!pwr_info
->lane_rx
|| !pwr_info
->lane_tx
) {
3963 dev_err(hba
->dev
, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3971 * First, get the maximum gears of HS speed.
3972 * If a zero value, it means there is no HSGEAR capability.
3973 * Then, get the maximum gears of PWM speed.
3975 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
), &pwr_info
->gear_rx
);
3976 if (!pwr_info
->gear_rx
) {
3977 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
3978 &pwr_info
->gear_rx
);
3979 if (!pwr_info
->gear_rx
) {
3980 dev_err(hba
->dev
, "%s: invalid max pwm rx gear read = %d\n",
3981 __func__
, pwr_info
->gear_rx
);
3984 pwr_info
->pwr_rx
= SLOW_MODE
;
3987 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
),
3988 &pwr_info
->gear_tx
);
3989 if (!pwr_info
->gear_tx
) {
3990 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
3991 &pwr_info
->gear_tx
);
3992 if (!pwr_info
->gear_tx
) {
3993 dev_err(hba
->dev
, "%s: invalid max pwm tx gear read = %d\n",
3994 __func__
, pwr_info
->gear_tx
);
3997 pwr_info
->pwr_tx
= SLOW_MODE
;
4000 hba
->max_pwr_info
.is_valid
= true;
4004 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
4005 struct ufs_pa_layer_attr
*pwr_mode
)
4009 /* if already configured to the requested pwr_mode */
4010 if (pwr_mode
->gear_rx
== hba
->pwr_info
.gear_rx
&&
4011 pwr_mode
->gear_tx
== hba
->pwr_info
.gear_tx
&&
4012 pwr_mode
->lane_rx
== hba
->pwr_info
.lane_rx
&&
4013 pwr_mode
->lane_tx
== hba
->pwr_info
.lane_tx
&&
4014 pwr_mode
->pwr_rx
== hba
->pwr_info
.pwr_rx
&&
4015 pwr_mode
->pwr_tx
== hba
->pwr_info
.pwr_tx
&&
4016 pwr_mode
->hs_rate
== hba
->pwr_info
.hs_rate
) {
4017 dev_dbg(hba
->dev
, "%s: power already configured\n", __func__
);
4022 * Configure attributes for power mode change with below.
4023 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4024 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4027 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXGEAR
), pwr_mode
->gear_rx
);
4028 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVERXDATALANES
),
4030 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4031 pwr_mode
->pwr_rx
== FAST_MODE
)
4032 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), TRUE
);
4034 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), FALSE
);
4036 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXGEAR
), pwr_mode
->gear_tx
);
4037 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVETXDATALANES
),
4039 if (pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4040 pwr_mode
->pwr_tx
== FAST_MODE
)
4041 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), TRUE
);
4043 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), FALSE
);
4045 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4046 pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4047 pwr_mode
->pwr_rx
== FAST_MODE
||
4048 pwr_mode
->pwr_tx
== FAST_MODE
)
4049 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HSSERIES
),
4052 ret
= ufshcd_uic_change_pwr_mode(hba
, pwr_mode
->pwr_rx
<< 4
4053 | pwr_mode
->pwr_tx
);
4057 "%s: power mode change failed %d\n", __func__
, ret
);
4059 ufshcd_vops_pwr_change_notify(hba
, POST_CHANGE
, NULL
,
4062 memcpy(&hba
->pwr_info
, pwr_mode
,
4063 sizeof(struct ufs_pa_layer_attr
));
4070 * ufshcd_config_pwr_mode - configure a new power mode
4071 * @hba: per-adapter instance
4072 * @desired_pwr_mode: desired power configuration
4074 int ufshcd_config_pwr_mode(struct ufs_hba
*hba
,
4075 struct ufs_pa_layer_attr
*desired_pwr_mode
)
4077 struct ufs_pa_layer_attr final_params
= { 0 };
4080 ret
= ufshcd_vops_pwr_change_notify(hba
, PRE_CHANGE
,
4081 desired_pwr_mode
, &final_params
);
4084 memcpy(&final_params
, desired_pwr_mode
, sizeof(final_params
));
4086 ret
= ufshcd_change_power_mode(hba
, &final_params
);
4088 ufshcd_print_pwr_info(hba
);
4092 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode
);
4095 * ufshcd_complete_dev_init() - checks device readiness
4096 * @hba: per-adapter instance
4098 * Set fDeviceInit flag and poll until device toggles it.
4100 static int ufshcd_complete_dev_init(struct ufs_hba
*hba
)
4106 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
4107 QUERY_FLAG_IDN_FDEVICEINIT
, NULL
);
4110 "%s setting fDeviceInit flag failed with error %d\n",
4115 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4116 for (i
= 0; i
< 1000 && !err
&& flag_res
; i
++)
4117 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
4118 QUERY_FLAG_IDN_FDEVICEINIT
, &flag_res
);
4122 "%s reading fDeviceInit flag failed with error %d\n",
4126 "%s fDeviceInit was not cleared by the device\n",
4134 * ufshcd_make_hba_operational - Make UFS controller operational
4135 * @hba: per adapter instance
4137 * To bring UFS host controller to operational state,
4138 * 1. Enable required interrupts
4139 * 2. Configure interrupt aggregation
4140 * 3. Program UTRL and UTMRL base address
4141 * 4. Configure run-stop-registers
4143 * Returns 0 on success, non-zero value on failure
4145 static int ufshcd_make_hba_operational(struct ufs_hba
*hba
)
4150 /* Enable required interrupts */
4151 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_INTRS
);
4153 /* Configure interrupt aggregation */
4154 if (ufshcd_is_intr_aggr_allowed(hba
))
4155 ufshcd_config_intr_aggr(hba
, hba
->nutrs
- 1, INT_AGGR_DEF_TO
);
4157 ufshcd_disable_intr_aggr(hba
);
4159 /* Configure UTRL and UTMRL base address registers */
4160 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
4161 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
4162 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
4163 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
4164 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
4165 REG_UTP_TASK_REQ_LIST_BASE_L
);
4166 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
4167 REG_UTP_TASK_REQ_LIST_BASE_H
);
4170 * Make sure base address and interrupt setup are updated before
4171 * enabling the run/stop registers below.
4176 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4178 reg
= ufshcd_readl(hba
, REG_CONTROLLER_STATUS
);
4179 if (!(ufshcd_get_lists_status(reg
))) {
4180 ufshcd_enable_run_stop_reg(hba
);
4183 "Host controller not ready to process requests");
4193 * ufshcd_hba_stop - Send controller to reset state
4194 * @hba: per adapter instance
4195 * @can_sleep: perform sleep or just spin
4197 static inline void ufshcd_hba_stop(struct ufs_hba
*hba
, bool can_sleep
)
4201 ufshcd_writel(hba
, CONTROLLER_DISABLE
, REG_CONTROLLER_ENABLE
);
4202 err
= ufshcd_wait_for_register(hba
, REG_CONTROLLER_ENABLE
,
4203 CONTROLLER_ENABLE
, CONTROLLER_DISABLE
,
4206 dev_err(hba
->dev
, "%s: Controller disable failed\n", __func__
);
4210 * ufshcd_hba_execute_hce - initialize the controller
4211 * @hba: per adapter instance
4213 * The controller resets itself and controller firmware initialization
4214 * sequence kicks off. When controller is ready it will set
4215 * the Host Controller Enable bit to 1.
4217 * Returns 0 on success, non-zero value on failure
4219 static int ufshcd_hba_execute_hce(struct ufs_hba
*hba
)
4223 if (!ufshcd_is_hba_active(hba
))
4224 /* change controller state to "reset state" */
4225 ufshcd_hba_stop(hba
, true);
4227 /* UniPro link is disabled at this point */
4228 ufshcd_set_link_off(hba
);
4230 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4232 /* start controller initialization sequence */
4233 ufshcd_hba_start(hba
);
4236 * To initialize a UFS host controller HCE bit must be set to 1.
4237 * During initialization the HCE bit value changes from 1->0->1.
4238 * When the host controller completes initialization sequence
4239 * it sets the value of HCE bit to 1. The same HCE bit is read back
4240 * to check if the controller has completed initialization sequence.
4241 * So without this delay the value HCE = 1, set in the previous
4242 * instruction might be read back.
4243 * This delay can be changed based on the controller.
4245 usleep_range(1000, 1100);
4247 /* wait for the host controller to complete initialization */
4249 while (ufshcd_is_hba_active(hba
)) {
4254 "Controller enable failed\n");
4257 usleep_range(5000, 5100);
4260 /* enable UIC related interrupts */
4261 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4263 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4268 static int ufshcd_hba_enable(struct ufs_hba
*hba
)
4272 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_HCE
) {
4273 ufshcd_set_link_off(hba
);
4274 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4276 /* enable UIC related interrupts */
4277 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4278 ret
= ufshcd_dme_reset(hba
);
4280 ret
= ufshcd_dme_enable(hba
);
4282 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4285 "Host controller enable failed with non-hce\n");
4288 ret
= ufshcd_hba_execute_hce(hba
);
4293 static int ufshcd_disable_tx_lcc(struct ufs_hba
*hba
, bool peer
)
4295 int tx_lanes
, i
, err
= 0;
4298 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4301 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4303 for (i
= 0; i
< tx_lanes
; i
++) {
4305 err
= ufshcd_dme_set(hba
,
4306 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4307 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4310 err
= ufshcd_dme_peer_set(hba
,
4311 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4312 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4315 dev_err(hba
->dev
, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4316 __func__
, peer
, i
, err
);
4324 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba
*hba
)
4326 return ufshcd_disable_tx_lcc(hba
, true);
4329 static void ufshcd_update_reg_hist(struct ufs_err_reg_hist
*reg_hist
,
4332 reg_hist
->reg
[reg_hist
->pos
] = reg
;
4333 reg_hist
->tstamp
[reg_hist
->pos
] = ktime_get();
4334 reg_hist
->pos
= (reg_hist
->pos
+ 1) % UFS_ERR_REG_HIST_LENGTH
;
4338 * ufshcd_link_startup - Initialize unipro link startup
4339 * @hba: per adapter instance
4341 * Returns 0 for success, non-zero in case of failure
4343 static int ufshcd_link_startup(struct ufs_hba
*hba
)
4346 int retries
= DME_LINKSTARTUP_RETRIES
;
4347 bool link_startup_again
= false;
4350 * If UFS device isn't active then we will have to issue link startup
4351 * 2 times to make sure the device state move to active.
4353 if (!ufshcd_is_ufs_dev_active(hba
))
4354 link_startup_again
= true;
4358 ufshcd_vops_link_startup_notify(hba
, PRE_CHANGE
);
4360 ret
= ufshcd_dme_link_startup(hba
);
4362 /* check if device is detected by inter-connect layer */
4363 if (!ret
&& !ufshcd_is_device_present(hba
)) {
4364 ufshcd_update_reg_hist(&hba
->ufs_stats
.link_startup_err
,
4366 dev_err(hba
->dev
, "%s: Device not present\n", __func__
);
4372 * DME link lost indication is only received when link is up,
4373 * but we can't be sure if the link is up until link startup
4374 * succeeds. So reset the local Uni-Pro and try again.
4376 if (ret
&& ufshcd_hba_enable(hba
)) {
4377 ufshcd_update_reg_hist(&hba
->ufs_stats
.link_startup_err
,
4381 } while (ret
&& retries
--);
4384 /* failed to get the link up... retire */
4385 ufshcd_update_reg_hist(&hba
->ufs_stats
.link_startup_err
,
4390 if (link_startup_again
) {
4391 link_startup_again
= false;
4392 retries
= DME_LINKSTARTUP_RETRIES
;
4396 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4397 ufshcd_init_pwr_info(hba
);
4398 ufshcd_print_pwr_info(hba
);
4400 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_LCC
) {
4401 ret
= ufshcd_disable_device_tx_lcc(hba
);
4406 /* Include any host controller configuration via UIC commands */
4407 ret
= ufshcd_vops_link_startup_notify(hba
, POST_CHANGE
);
4411 ret
= ufshcd_make_hba_operational(hba
);
4414 dev_err(hba
->dev
, "link startup failed %d\n", ret
);
4415 ufshcd_print_host_state(hba
);
4416 ufshcd_print_pwr_info(hba
);
4417 ufshcd_print_host_regs(hba
);
4423 * ufshcd_verify_dev_init() - Verify device initialization
4424 * @hba: per-adapter instance
4426 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4427 * device Transport Protocol (UTP) layer is ready after a reset.
4428 * If the UTP layer at the device side is not initialized, it may
4429 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4430 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4432 static int ufshcd_verify_dev_init(struct ufs_hba
*hba
)
4437 ufshcd_hold(hba
, false);
4438 mutex_lock(&hba
->dev_cmd
.lock
);
4439 for (retries
= NOP_OUT_RETRIES
; retries
> 0; retries
--) {
4440 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_NOP
,
4443 if (!err
|| err
== -ETIMEDOUT
)
4446 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
4448 mutex_unlock(&hba
->dev_cmd
.lock
);
4449 ufshcd_release(hba
);
4452 dev_err(hba
->dev
, "%s: NOP OUT failed %d\n", __func__
, err
);
4457 * ufshcd_set_queue_depth - set lun queue depth
4458 * @sdev: pointer to SCSI device
4460 * Read bLUQueueDepth value and activate scsi tagged command
4461 * queueing. For WLUN, queue depth is set to 1. For best-effort
4462 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4463 * value that host can queue.
4465 static void ufshcd_set_queue_depth(struct scsi_device
*sdev
)
4469 struct ufs_hba
*hba
;
4471 hba
= shost_priv(sdev
->host
);
4473 lun_qdepth
= hba
->nutrs
;
4474 ret
= ufshcd_read_unit_desc_param(hba
,
4475 ufshcd_scsi_to_upiu_lun(sdev
->lun
),
4476 UNIT_DESC_PARAM_LU_Q_DEPTH
,
4478 sizeof(lun_qdepth
));
4480 /* Some WLUN doesn't support unit descriptor */
4481 if (ret
== -EOPNOTSUPP
)
4483 else if (!lun_qdepth
)
4484 /* eventually, we can figure out the real queue depth */
4485 lun_qdepth
= hba
->nutrs
;
4487 lun_qdepth
= min_t(int, lun_qdepth
, hba
->nutrs
);
4489 dev_dbg(hba
->dev
, "%s: activate tcq with queue depth %d\n",
4490 __func__
, lun_qdepth
);
4491 scsi_change_queue_depth(sdev
, lun_qdepth
);
4495 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4496 * @hba: per-adapter instance
4497 * @lun: UFS device lun id
4498 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4500 * Returns 0 in case of success and b_lu_write_protect status would be returned
4501 * @b_lu_write_protect parameter.
4502 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4503 * Returns -EINVAL in case of invalid parameters passed to this function.
4505 static int ufshcd_get_lu_wp(struct ufs_hba
*hba
,
4507 u8
*b_lu_write_protect
)
4511 if (!b_lu_write_protect
)
4514 * According to UFS device spec, RPMB LU can't be write
4515 * protected so skip reading bLUWriteProtect parameter for
4516 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4518 else if (lun
>= UFS_UPIU_MAX_GENERAL_LUN
)
4521 ret
= ufshcd_read_unit_desc_param(hba
,
4523 UNIT_DESC_PARAM_LU_WR_PROTECT
,
4525 sizeof(*b_lu_write_protect
));
4530 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4532 * @hba: per-adapter instance
4533 * @sdev: pointer to SCSI device
4536 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba
*hba
,
4537 struct scsi_device
*sdev
)
4539 if (hba
->dev_info
.f_power_on_wp_en
&&
4540 !hba
->dev_info
.is_lu_power_on_wp
) {
4541 u8 b_lu_write_protect
;
4543 if (!ufshcd_get_lu_wp(hba
, ufshcd_scsi_to_upiu_lun(sdev
->lun
),
4544 &b_lu_write_protect
) &&
4545 (b_lu_write_protect
== UFS_LU_POWER_ON_WP
))
4546 hba
->dev_info
.is_lu_power_on_wp
= true;
4551 * ufshcd_slave_alloc - handle initial SCSI device configurations
4552 * @sdev: pointer to SCSI device
4556 static int ufshcd_slave_alloc(struct scsi_device
*sdev
)
4558 struct ufs_hba
*hba
;
4560 hba
= shost_priv(sdev
->host
);
4562 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4563 sdev
->use_10_for_ms
= 1;
4565 /* allow SCSI layer to restart the device in case of errors */
4566 sdev
->allow_restart
= 1;
4568 /* REPORT SUPPORTED OPERATION CODES is not supported */
4569 sdev
->no_report_opcodes
= 1;
4571 /* WRITE_SAME command is not supported */
4572 sdev
->no_write_same
= 1;
4574 ufshcd_set_queue_depth(sdev
);
4576 ufshcd_get_lu_power_on_wp_status(hba
, sdev
);
4582 * ufshcd_change_queue_depth - change queue depth
4583 * @sdev: pointer to SCSI device
4584 * @depth: required depth to set
4586 * Change queue depth and make sure the max. limits are not crossed.
4588 static int ufshcd_change_queue_depth(struct scsi_device
*sdev
, int depth
)
4590 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
4592 if (depth
> hba
->nutrs
)
4594 return scsi_change_queue_depth(sdev
, depth
);
4598 * ufshcd_slave_configure - adjust SCSI device configurations
4599 * @sdev: pointer to SCSI device
4601 static int ufshcd_slave_configure(struct scsi_device
*sdev
)
4603 struct request_queue
*q
= sdev
->request_queue
;
4605 blk_queue_update_dma_pad(q
, PRDT_DATA_BYTE_COUNT_PAD
- 1);
4610 * ufshcd_slave_destroy - remove SCSI device configurations
4611 * @sdev: pointer to SCSI device
4613 static void ufshcd_slave_destroy(struct scsi_device
*sdev
)
4615 struct ufs_hba
*hba
;
4617 hba
= shost_priv(sdev
->host
);
4618 /* Drop the reference as it won't be needed anymore */
4619 if (ufshcd_scsi_to_upiu_lun(sdev
->lun
) == UFS_UPIU_UFS_DEVICE_WLUN
) {
4620 unsigned long flags
;
4622 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4623 hba
->sdev_ufs_device
= NULL
;
4624 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4629 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4630 * @lrbp: pointer to local reference block of completed command
4631 * @scsi_status: SCSI command status
4633 * Returns value base on SCSI command status
4636 ufshcd_scsi_cmd_status(struct ufshcd_lrb
*lrbp
, int scsi_status
)
4640 switch (scsi_status
) {
4641 case SAM_STAT_CHECK_CONDITION
:
4642 ufshcd_copy_sense_data(lrbp
);
4645 result
|= DID_OK
<< 16 |
4646 COMMAND_COMPLETE
<< 8 |
4649 case SAM_STAT_TASK_SET_FULL
:
4651 case SAM_STAT_TASK_ABORTED
:
4652 ufshcd_copy_sense_data(lrbp
);
4653 result
|= scsi_status
;
4656 result
|= DID_ERROR
<< 16;
4658 } /* end of switch */
4664 * ufshcd_transfer_rsp_status - Get overall status of the response
4665 * @hba: per adapter instance
4666 * @lrbp: pointer to local reference block of completed command
4668 * Returns result of the command to notify SCSI midlayer
4671 ufshcd_transfer_rsp_status(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
4677 /* overall command status of utrd */
4678 ocs
= ufshcd_get_tr_ocs(lrbp
);
4682 result
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
4683 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
4685 case UPIU_TRANSACTION_RESPONSE
:
4687 * get the response UPIU result to extract
4688 * the SCSI command status
4690 result
= ufshcd_get_rsp_upiu_result(lrbp
->ucd_rsp_ptr
);
4693 * get the result based on SCSI status response
4694 * to notify the SCSI midlayer of the command status
4696 scsi_status
= result
& MASK_SCSI_STATUS
;
4697 result
= ufshcd_scsi_cmd_status(lrbp
, scsi_status
);
4700 * Currently we are only supporting BKOPs exception
4701 * events hence we can ignore BKOPs exception event
4702 * during power management callbacks. BKOPs exception
4703 * event is not expected to be raised in runtime suspend
4704 * callback as it allows the urgent bkops.
4705 * During system suspend, we are anyway forcefully
4706 * disabling the bkops and if urgent bkops is needed
4707 * it will be enabled on system resume. Long term
4708 * solution could be to abort the system suspend if
4709 * UFS device needs urgent BKOPs.
4711 if (!hba
->pm_op_in_progress
&&
4712 ufshcd_is_exception_event(lrbp
->ucd_rsp_ptr
))
4713 schedule_work(&hba
->eeh_work
);
4715 case UPIU_TRANSACTION_REJECT_UPIU
:
4716 /* TODO: handle Reject UPIU Response */
4717 result
= DID_ERROR
<< 16;
4719 "Reject UPIU not fully implemented\n");
4723 "Unexpected request response code = %x\n",
4725 result
= DID_ERROR
<< 16;
4730 result
|= DID_ABORT
<< 16;
4732 case OCS_INVALID_COMMAND_STATUS
:
4733 result
|= DID_REQUEUE
<< 16;
4735 case OCS_INVALID_CMD_TABLE_ATTR
:
4736 case OCS_INVALID_PRDT_ATTR
:
4737 case OCS_MISMATCH_DATA_BUF_SIZE
:
4738 case OCS_MISMATCH_RESP_UPIU_SIZE
:
4739 case OCS_PEER_COMM_FAILURE
:
4740 case OCS_FATAL_ERROR
:
4742 result
|= DID_ERROR
<< 16;
4744 "OCS error from controller = %x for tag %d\n",
4745 ocs
, lrbp
->task_tag
);
4746 ufshcd_print_host_regs(hba
);
4747 ufshcd_print_host_state(hba
);
4749 } /* end of switch */
4751 if ((host_byte(result
) != DID_OK
) &&
4752 (host_byte(result
) != DID_REQUEUE
) && !hba
->silence_err_logs
)
4753 ufshcd_print_trs(hba
, 1 << lrbp
->task_tag
, true);
4758 * ufshcd_uic_cmd_compl - handle completion of uic command
4759 * @hba: per adapter instance
4760 * @intr_status: interrupt status generated by the controller
4763 * IRQ_HANDLED - If interrupt is valid
4764 * IRQ_NONE - If invalid interrupt
4766 static irqreturn_t
ufshcd_uic_cmd_compl(struct ufs_hba
*hba
, u32 intr_status
)
4768 irqreturn_t retval
= IRQ_NONE
;
4770 if ((intr_status
& UIC_COMMAND_COMPL
) && hba
->active_uic_cmd
) {
4771 hba
->active_uic_cmd
->argument2
|=
4772 ufshcd_get_uic_cmd_result(hba
);
4773 hba
->active_uic_cmd
->argument3
=
4774 ufshcd_get_dme_attr_val(hba
);
4775 complete(&hba
->active_uic_cmd
->done
);
4776 retval
= IRQ_HANDLED
;
4779 if ((intr_status
& UFSHCD_UIC_PWR_MASK
) && hba
->uic_async_done
) {
4780 complete(hba
->uic_async_done
);
4781 retval
= IRQ_HANDLED
;
4787 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4788 * @hba: per adapter instance
4789 * @completed_reqs: requests to complete
4791 static void __ufshcd_transfer_req_compl(struct ufs_hba
*hba
,
4792 unsigned long completed_reqs
)
4794 struct ufshcd_lrb
*lrbp
;
4795 struct scsi_cmnd
*cmd
;
4799 for_each_set_bit(index
, &completed_reqs
, hba
->nutrs
) {
4800 lrbp
= &hba
->lrb
[index
];
4803 ufshcd_add_command_trace(hba
, index
, "complete");
4804 result
= ufshcd_transfer_rsp_status(hba
, lrbp
);
4805 scsi_dma_unmap(cmd
);
4806 cmd
->result
= result
;
4807 /* Mark completed command as NULL in LRB */
4809 /* Do not touch lrbp after scsi done */
4810 cmd
->scsi_done(cmd
);
4811 __ufshcd_release(hba
);
4812 } else if (lrbp
->command_type
== UTP_CMD_TYPE_DEV_MANAGE
||
4813 lrbp
->command_type
== UTP_CMD_TYPE_UFS_STORAGE
) {
4814 if (hba
->dev_cmd
.complete
) {
4815 ufshcd_add_command_trace(hba
, index
,
4817 complete(hba
->dev_cmd
.complete
);
4820 if (ufshcd_is_clkscaling_supported(hba
))
4821 hba
->clk_scaling
.active_reqs
--;
4823 lrbp
->compl_time_stamp
= ktime_get();
4826 /* clear corresponding bits of completed commands */
4827 hba
->outstanding_reqs
^= completed_reqs
;
4829 ufshcd_clk_scaling_update_busy(hba
);
4833 * ufshcd_transfer_req_compl - handle SCSI and query command completion
4834 * @hba: per adapter instance
4837 * IRQ_HANDLED - If interrupt is valid
4838 * IRQ_NONE - If invalid interrupt
4840 static irqreturn_t
ufshcd_transfer_req_compl(struct ufs_hba
*hba
)
4842 unsigned long completed_reqs
;
4845 /* Resetting interrupt aggregation counters first and reading the
4846 * DOOR_BELL afterward allows us to handle all the completed requests.
4847 * In order to prevent other interrupts starvation the DB is read once
4848 * after reset. The down side of this solution is the possibility of
4849 * false interrupt if device completes another request after resetting
4850 * aggregation and before reading the DB.
4852 if (ufshcd_is_intr_aggr_allowed(hba
) &&
4853 !(hba
->quirks
& UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR
))
4854 ufshcd_reset_intr_aggr(hba
);
4856 tr_doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
4857 completed_reqs
= tr_doorbell
^ hba
->outstanding_reqs
;
4859 if (completed_reqs
) {
4860 __ufshcd_transfer_req_compl(hba
, completed_reqs
);
4868 * ufshcd_disable_ee - disable exception event
4869 * @hba: per-adapter instance
4870 * @mask: exception event to disable
4872 * Disables exception event in the device so that the EVENT_ALERT
4875 * Returns zero on success, non-zero error value on failure.
4877 static int ufshcd_disable_ee(struct ufs_hba
*hba
, u16 mask
)
4882 if (!(hba
->ee_ctrl_mask
& mask
))
4885 val
= hba
->ee_ctrl_mask
& ~mask
;
4886 val
&= MASK_EE_STATUS
;
4887 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
4888 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0, &val
);
4890 hba
->ee_ctrl_mask
&= ~mask
;
4896 * ufshcd_enable_ee - enable exception event
4897 * @hba: per-adapter instance
4898 * @mask: exception event to enable
4900 * Enable corresponding exception event in the device to allow
4901 * device to alert host in critical scenarios.
4903 * Returns zero on success, non-zero error value on failure.
4905 static int ufshcd_enable_ee(struct ufs_hba
*hba
, u16 mask
)
4910 if (hba
->ee_ctrl_mask
& mask
)
4913 val
= hba
->ee_ctrl_mask
| mask
;
4914 val
&= MASK_EE_STATUS
;
4915 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
4916 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0, &val
);
4918 hba
->ee_ctrl_mask
|= mask
;
4924 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4925 * @hba: per-adapter instance
4927 * Allow device to manage background operations on its own. Enabling
4928 * this might lead to inconsistent latencies during normal data transfers
4929 * as the device is allowed to manage its own way of handling background
4932 * Returns zero on success, non-zero on failure.
4934 static int ufshcd_enable_auto_bkops(struct ufs_hba
*hba
)
4938 if (hba
->auto_bkops_enabled
)
4941 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
4942 QUERY_FLAG_IDN_BKOPS_EN
, NULL
);
4944 dev_err(hba
->dev
, "%s: failed to enable bkops %d\n",
4949 hba
->auto_bkops_enabled
= true;
4950 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Enabled");
4952 /* No need of URGENT_BKOPS exception from the device */
4953 err
= ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
4955 dev_err(hba
->dev
, "%s: failed to disable exception event %d\n",
4962 * ufshcd_disable_auto_bkops - block device in doing background operations
4963 * @hba: per-adapter instance
4965 * Disabling background operations improves command response latency but
4966 * has drawback of device moving into critical state where the device is
4967 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
4968 * host is idle so that BKOPS are managed effectively without any negative
4971 * Returns zero on success, non-zero on failure.
4973 static int ufshcd_disable_auto_bkops(struct ufs_hba
*hba
)
4977 if (!hba
->auto_bkops_enabled
)
4981 * If host assisted BKOPs is to be enabled, make sure
4982 * urgent bkops exception is allowed.
4984 err
= ufshcd_enable_ee(hba
, MASK_EE_URGENT_BKOPS
);
4986 dev_err(hba
->dev
, "%s: failed to enable exception event %d\n",
4991 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_CLEAR_FLAG
,
4992 QUERY_FLAG_IDN_BKOPS_EN
, NULL
);
4994 dev_err(hba
->dev
, "%s: failed to disable bkops %d\n",
4996 ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5000 hba
->auto_bkops_enabled
= false;
5001 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Disabled");
5002 hba
->is_urgent_bkops_lvl_checked
= false;
5008 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5009 * @hba: per adapter instance
5011 * After a device reset the device may toggle the BKOPS_EN flag
5012 * to default value. The s/w tracking variables should be updated
5013 * as well. This function would change the auto-bkops state based on
5014 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5016 static void ufshcd_force_reset_auto_bkops(struct ufs_hba
*hba
)
5018 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
)) {
5019 hba
->auto_bkops_enabled
= false;
5020 hba
->ee_ctrl_mask
|= MASK_EE_URGENT_BKOPS
;
5021 ufshcd_enable_auto_bkops(hba
);
5023 hba
->auto_bkops_enabled
= true;
5024 hba
->ee_ctrl_mask
&= ~MASK_EE_URGENT_BKOPS
;
5025 ufshcd_disable_auto_bkops(hba
);
5027 hba
->is_urgent_bkops_lvl_checked
= false;
5030 static inline int ufshcd_get_bkops_status(struct ufs_hba
*hba
, u32
*status
)
5032 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5033 QUERY_ATTR_IDN_BKOPS_STATUS
, 0, 0, status
);
5037 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5038 * @hba: per-adapter instance
5039 * @status: bkops_status value
5041 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5042 * flag in the device to permit background operations if the device
5043 * bkops_status is greater than or equal to "status" argument passed to
5044 * this function, disable otherwise.
5046 * Returns 0 for success, non-zero in case of failure.
5048 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5049 * to know whether auto bkops is enabled or disabled after this function
5050 * returns control to it.
5052 static int ufshcd_bkops_ctrl(struct ufs_hba
*hba
,
5053 enum bkops_status status
)
5056 u32 curr_status
= 0;
5058 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5060 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5063 } else if (curr_status
> BKOPS_STATUS_MAX
) {
5064 dev_err(hba
->dev
, "%s: invalid BKOPS status %d\n",
5065 __func__
, curr_status
);
5070 if (curr_status
>= status
)
5071 err
= ufshcd_enable_auto_bkops(hba
);
5073 err
= ufshcd_disable_auto_bkops(hba
);
5079 * ufshcd_urgent_bkops - handle urgent bkops exception event
5080 * @hba: per-adapter instance
5082 * Enable fBackgroundOpsEn flag in the device to permit background
5085 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5086 * and negative error value for any other failure.
5088 static int ufshcd_urgent_bkops(struct ufs_hba
*hba
)
5090 return ufshcd_bkops_ctrl(hba
, hba
->urgent_bkops_lvl
);
5093 static inline int ufshcd_get_ee_status(struct ufs_hba
*hba
, u32
*status
)
5095 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5096 QUERY_ATTR_IDN_EE_STATUS
, 0, 0, status
);
5099 static void ufshcd_bkops_exception_event_handler(struct ufs_hba
*hba
)
5102 u32 curr_status
= 0;
5104 if (hba
->is_urgent_bkops_lvl_checked
)
5105 goto enable_auto_bkops
;
5107 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5109 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5115 * We are seeing that some devices are raising the urgent bkops
5116 * exception events even when BKOPS status doesn't indicate performace
5117 * impacted or critical. Handle these device by determining their urgent
5118 * bkops status at runtime.
5120 if (curr_status
< BKOPS_STATUS_PERF_IMPACT
) {
5121 dev_err(hba
->dev
, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5122 __func__
, curr_status
);
5123 /* update the current status as the urgent bkops level */
5124 hba
->urgent_bkops_lvl
= curr_status
;
5125 hba
->is_urgent_bkops_lvl_checked
= true;
5129 err
= ufshcd_enable_auto_bkops(hba
);
5132 dev_err(hba
->dev
, "%s: failed to handle urgent bkops %d\n",
5137 * ufshcd_exception_event_handler - handle exceptions raised by device
5138 * @work: pointer to work data
5140 * Read bExceptionEventStatus attribute from the device and handle the
5141 * exception event accordingly.
5143 static void ufshcd_exception_event_handler(struct work_struct
*work
)
5145 struct ufs_hba
*hba
;
5148 hba
= container_of(work
, struct ufs_hba
, eeh_work
);
5150 pm_runtime_get_sync(hba
->dev
);
5151 scsi_block_requests(hba
->host
);
5152 err
= ufshcd_get_ee_status(hba
, &status
);
5154 dev_err(hba
->dev
, "%s: failed to get exception status %d\n",
5159 status
&= hba
->ee_ctrl_mask
;
5161 if (status
& MASK_EE_URGENT_BKOPS
)
5162 ufshcd_bkops_exception_event_handler(hba
);
5165 scsi_unblock_requests(hba
->host
);
5166 pm_runtime_put_sync(hba
->dev
);
5170 /* Complete requests that have door-bell cleared */
5171 static void ufshcd_complete_requests(struct ufs_hba
*hba
)
5173 ufshcd_transfer_req_compl(hba
);
5174 ufshcd_tmc_handler(hba
);
5178 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5179 * to recover from the DL NAC errors or not.
5180 * @hba: per-adapter instance
5182 * Returns true if error handling is required, false otherwise
5184 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba
*hba
)
5186 unsigned long flags
;
5187 bool err_handling
= true;
5189 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5191 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5192 * device fatal error and/or DL NAC & REPLAY timeout errors.
5194 if (hba
->saved_err
& (CONTROLLER_FATAL_ERROR
| SYSTEM_BUS_FATAL_ERROR
))
5197 if ((hba
->saved_err
& DEVICE_FATAL_ERROR
) ||
5198 ((hba
->saved_err
& UIC_ERROR
) &&
5199 (hba
->saved_uic_err
& UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))
5202 if ((hba
->saved_err
& UIC_ERROR
) &&
5203 (hba
->saved_uic_err
& UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)) {
5206 * wait for 50ms to see if we can get any other errors or not.
5208 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5210 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5213 * now check if we have got any other severe errors other than
5216 if ((hba
->saved_err
& INT_FATAL_ERRORS
) ||
5217 ((hba
->saved_err
& UIC_ERROR
) &&
5218 (hba
->saved_uic_err
& ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)))
5222 * As DL NAC is the only error received so far, send out NOP
5223 * command to confirm if link is still active or not.
5224 * - If we don't get any response then do error recovery.
5225 * - If we get response then clear the DL NAC error bit.
5228 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5229 err
= ufshcd_verify_dev_init(hba
);
5230 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5235 /* Link seems to be alive hence ignore the DL NAC errors */
5236 if (hba
->saved_uic_err
== UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)
5237 hba
->saved_err
&= ~UIC_ERROR
;
5238 /* clear NAC error */
5239 hba
->saved_uic_err
&= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
5240 if (!hba
->saved_uic_err
) {
5241 err_handling
= false;
5246 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5247 return err_handling
;
5251 * ufshcd_err_handler - handle UFS errors that require s/w attention
5252 * @work: pointer to work structure
5254 static void ufshcd_err_handler(struct work_struct
*work
)
5256 struct ufs_hba
*hba
;
5257 unsigned long flags
;
5262 bool needs_reset
= false;
5264 hba
= container_of(work
, struct ufs_hba
, eh_work
);
5266 pm_runtime_get_sync(hba
->dev
);
5267 ufshcd_hold(hba
, false);
5269 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5270 if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
5273 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
5274 ufshcd_set_eh_in_progress(hba
);
5276 /* Complete requests that have door-bell cleared by h/w */
5277 ufshcd_complete_requests(hba
);
5279 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
5282 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5283 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5284 ret
= ufshcd_quirk_dl_nac_errors(hba
);
5285 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5287 goto skip_err_handling
;
5289 if ((hba
->saved_err
& INT_FATAL_ERRORS
) ||
5290 (hba
->saved_err
& UFSHCD_UIC_HIBERN8_MASK
) ||
5291 ((hba
->saved_err
& UIC_ERROR
) &&
5292 (hba
->saved_uic_err
& (UFSHCD_UIC_DL_PA_INIT_ERROR
|
5293 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
|
5294 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
))))
5298 * if host reset is required then skip clearing the pending
5299 * transfers forcefully because they will get cleared during
5300 * host reset and restore
5303 goto skip_pending_xfer_clear
;
5305 /* release lock as clear command might sleep */
5306 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5307 /* Clear pending transfer requests */
5308 for_each_set_bit(tag
, &hba
->outstanding_reqs
, hba
->nutrs
) {
5309 if (ufshcd_clear_cmd(hba
, tag
)) {
5311 goto lock_skip_pending_xfer_clear
;
5315 /* Clear pending task management requests */
5316 for_each_set_bit(tag
, &hba
->outstanding_tasks
, hba
->nutmrs
) {
5317 if (ufshcd_clear_tm_cmd(hba
, tag
)) {
5319 goto lock_skip_pending_xfer_clear
;
5323 lock_skip_pending_xfer_clear
:
5324 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5326 /* Complete the requests that are cleared by s/w */
5327 ufshcd_complete_requests(hba
);
5329 if (err_xfer
|| err_tm
)
5332 skip_pending_xfer_clear
:
5333 /* Fatal errors need reset */
5335 unsigned long max_doorbells
= (1UL << hba
->nutrs
) - 1;
5338 * ufshcd_reset_and_restore() does the link reinitialization
5339 * which will need atleast one empty doorbell slot to send the
5340 * device management commands (NOP and query commands).
5341 * If there is no slot empty at this moment then free up last
5344 if (hba
->outstanding_reqs
== max_doorbells
)
5345 __ufshcd_transfer_req_compl(hba
,
5346 (1UL << (hba
->nutrs
- 1)));
5348 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5349 err
= ufshcd_reset_and_restore(hba
);
5350 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5352 dev_err(hba
->dev
, "%s: reset and restore failed\n",
5354 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
5357 * Inform scsi mid-layer that we did reset and allow to handle
5358 * Unit Attention properly.
5360 scsi_report_bus_reset(hba
->host
, 0);
5362 hba
->saved_uic_err
= 0;
5367 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
5368 if (hba
->saved_err
|| hba
->saved_uic_err
)
5369 dev_err_ratelimited(hba
->dev
, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5370 __func__
, hba
->saved_err
, hba
->saved_uic_err
);
5373 ufshcd_clear_eh_in_progress(hba
);
5376 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5377 ufshcd_scsi_unblock_requests(hba
);
5378 ufshcd_release(hba
);
5379 pm_runtime_put_sync(hba
->dev
);
5383 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5384 * @hba: per-adapter instance
5387 * IRQ_HANDLED - If interrupt is valid
5388 * IRQ_NONE - If invalid interrupt
5390 static irqreturn_t
ufshcd_update_uic_error(struct ufs_hba
*hba
)
5393 irqreturn_t retval
= IRQ_NONE
;
5395 /* PHY layer lane error */
5396 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
5397 /* Ignore LINERESET indication, as this is not an error */
5398 if ((reg
& UIC_PHY_ADAPTER_LAYER_ERROR
) &&
5399 (reg
& UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK
)) {
5401 * To know whether this error is fatal or not, DB timeout
5402 * must be checked but this error is handled separately.
5404 dev_dbg(hba
->dev
, "%s: UIC Lane error reported\n", __func__
);
5405 ufshcd_update_reg_hist(&hba
->ufs_stats
.pa_err
, reg
);
5406 retval
|= IRQ_HANDLED
;
5409 /* PA_INIT_ERROR is fatal and needs UIC reset */
5410 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DATA_LINK_LAYER
);
5411 if ((reg
& UIC_DATA_LINK_LAYER_ERROR
) &&
5412 (reg
& UIC_DATA_LINK_LAYER_ERROR_CODE_MASK
)) {
5413 ufshcd_update_reg_hist(&hba
->ufs_stats
.dl_err
, reg
);
5415 if (reg
& UIC_DATA_LINK_LAYER_ERROR_PA_INIT
)
5416 hba
->uic_error
|= UFSHCD_UIC_DL_PA_INIT_ERROR
;
5417 else if (hba
->dev_quirks
&
5418 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
5419 if (reg
& UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED
)
5421 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
5422 else if (reg
& UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT
)
5423 hba
->uic_error
|= UFSHCD_UIC_DL_TCx_REPLAY_ERROR
;
5425 retval
|= IRQ_HANDLED
;
5428 /* UIC NL/TL/DME errors needs software retry */
5429 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_NETWORK_LAYER
);
5430 if ((reg
& UIC_NETWORK_LAYER_ERROR
) &&
5431 (reg
& UIC_NETWORK_LAYER_ERROR_CODE_MASK
)) {
5432 ufshcd_update_reg_hist(&hba
->ufs_stats
.nl_err
, reg
);
5433 hba
->uic_error
|= UFSHCD_UIC_NL_ERROR
;
5434 retval
|= IRQ_HANDLED
;
5437 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_TRANSPORT_LAYER
);
5438 if ((reg
& UIC_TRANSPORT_LAYER_ERROR
) &&
5439 (reg
& UIC_TRANSPORT_LAYER_ERROR_CODE_MASK
)) {
5440 ufshcd_update_reg_hist(&hba
->ufs_stats
.tl_err
, reg
);
5441 hba
->uic_error
|= UFSHCD_UIC_TL_ERROR
;
5442 retval
|= IRQ_HANDLED
;
5445 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DME
);
5446 if ((reg
& UIC_DME_ERROR
) &&
5447 (reg
& UIC_DME_ERROR_CODE_MASK
)) {
5448 ufshcd_update_reg_hist(&hba
->ufs_stats
.dme_err
, reg
);
5449 hba
->uic_error
|= UFSHCD_UIC_DME_ERROR
;
5450 retval
|= IRQ_HANDLED
;
5453 dev_dbg(hba
->dev
, "%s: UIC error flags = 0x%08x\n",
5454 __func__
, hba
->uic_error
);
5458 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba
*hba
,
5461 if (!ufshcd_is_auto_hibern8_supported(hba
) ||
5462 !ufshcd_is_auto_hibern8_enabled(hba
))
5465 if (!(intr_mask
& UFSHCD_UIC_HIBERN8_MASK
))
5468 if (hba
->active_uic_cmd
&&
5469 (hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_ENTER
||
5470 hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_EXIT
))
5477 * ufshcd_check_errors - Check for errors that need s/w attention
5478 * @hba: per-adapter instance
5481 * IRQ_HANDLED - If interrupt is valid
5482 * IRQ_NONE - If invalid interrupt
5484 static irqreturn_t
ufshcd_check_errors(struct ufs_hba
*hba
)
5486 bool queue_eh_work
= false;
5487 irqreturn_t retval
= IRQ_NONE
;
5489 if (hba
->errors
& INT_FATAL_ERRORS
) {
5490 ufshcd_update_reg_hist(&hba
->ufs_stats
.fatal_err
, hba
->errors
);
5491 queue_eh_work
= true;
5494 if (hba
->errors
& UIC_ERROR
) {
5496 retval
= ufshcd_update_uic_error(hba
);
5498 queue_eh_work
= true;
5501 if (hba
->errors
& UFSHCD_UIC_HIBERN8_MASK
) {
5503 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5504 __func__
, (hba
->errors
& UIC_HIBERNATE_ENTER
) ?
5506 hba
->errors
, ufshcd_get_upmcrs(hba
));
5507 ufshcd_update_reg_hist(&hba
->ufs_stats
.auto_hibern8_err
,
5509 queue_eh_work
= true;
5512 if (queue_eh_work
) {
5514 * update the transfer error masks to sticky bits, let's do this
5515 * irrespective of current ufshcd_state.
5517 hba
->saved_err
|= hba
->errors
;
5518 hba
->saved_uic_err
|= hba
->uic_error
;
5520 /* handle fatal errors only when link is functional */
5521 if (hba
->ufshcd_state
== UFSHCD_STATE_OPERATIONAL
) {
5522 /* block commands from scsi mid-layer */
5523 ufshcd_scsi_block_requests(hba
);
5525 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED
;
5527 /* dump controller state before resetting */
5528 if (hba
->saved_err
& (INT_FATAL_ERRORS
| UIC_ERROR
)) {
5529 bool pr_prdt
= !!(hba
->saved_err
&
5530 SYSTEM_BUS_FATAL_ERROR
);
5532 dev_err(hba
->dev
, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5533 __func__
, hba
->saved_err
,
5534 hba
->saved_uic_err
);
5536 ufshcd_print_host_regs(hba
);
5537 ufshcd_print_pwr_info(hba
);
5538 ufshcd_print_tmrs(hba
, hba
->outstanding_tasks
);
5539 ufshcd_print_trs(hba
, hba
->outstanding_reqs
,
5542 schedule_work(&hba
->eh_work
);
5544 retval
|= IRQ_HANDLED
;
5547 * if (!queue_eh_work) -
5548 * Other errors are either non-fatal where host recovers
5549 * itself without s/w intervention or errors that will be
5550 * handled by the SCSI core layer.
5556 struct ufs_hba
*hba
;
5557 unsigned long pending
;
5561 static bool ufshcd_compl_tm(struct request
*req
, void *priv
, bool reserved
)
5563 struct ctm_info
*const ci
= priv
;
5564 struct completion
*c
;
5566 WARN_ON_ONCE(reserved
);
5567 if (test_bit(req
->tag
, &ci
->pending
))
5570 c
= req
->end_io_data
;
5577 * ufshcd_tmc_handler - handle task management function completion
5578 * @hba: per adapter instance
5581 * IRQ_HANDLED - If interrupt is valid
5582 * IRQ_NONE - If invalid interrupt
5584 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
)
5586 struct request_queue
*q
= hba
->tmf_queue
;
5587 struct ctm_info ci
= {
5589 .pending
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
),
5592 blk_mq_tagset_busy_iter(q
->tag_set
, ufshcd_compl_tm
, &ci
);
5593 return ci
.ncpl
? IRQ_HANDLED
: IRQ_NONE
;
5597 * ufshcd_sl_intr - Interrupt service routine
5598 * @hba: per adapter instance
5599 * @intr_status: contains interrupts generated by the controller
5602 * IRQ_HANDLED - If interrupt is valid
5603 * IRQ_NONE - If invalid interrupt
5605 static irqreturn_t
ufshcd_sl_intr(struct ufs_hba
*hba
, u32 intr_status
)
5607 irqreturn_t retval
= IRQ_NONE
;
5609 hba
->errors
= UFSHCD_ERROR_MASK
& intr_status
;
5611 if (ufshcd_is_auto_hibern8_error(hba
, intr_status
))
5612 hba
->errors
|= (UFSHCD_UIC_HIBERN8_MASK
& intr_status
);
5615 retval
|= ufshcd_check_errors(hba
);
5617 if (intr_status
& UFSHCD_UIC_MASK
)
5618 retval
|= ufshcd_uic_cmd_compl(hba
, intr_status
);
5620 if (intr_status
& UTP_TASK_REQ_COMPL
)
5621 retval
|= ufshcd_tmc_handler(hba
);
5623 if (intr_status
& UTP_TRANSFER_REQ_COMPL
)
5624 retval
|= ufshcd_transfer_req_compl(hba
);
5630 * ufshcd_intr - Main interrupt service routine
5632 * @__hba: pointer to adapter instance
5635 * IRQ_HANDLED - If interrupt is valid
5636 * IRQ_NONE - If invalid interrupt
5638 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
)
5640 u32 intr_status
, enabled_intr_status
= 0;
5641 irqreturn_t retval
= IRQ_NONE
;
5642 struct ufs_hba
*hba
= __hba
;
5643 int retries
= hba
->nutrs
;
5645 spin_lock(hba
->host
->host_lock
);
5646 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
5649 * There could be max of hba->nutrs reqs in flight and in worst case
5650 * if the reqs get finished 1 by 1 after the interrupt status is
5651 * read, make sure we handle them by checking the interrupt status
5652 * again in a loop until we process all of the reqs before returning.
5654 while (intr_status
&& retries
--) {
5655 enabled_intr_status
=
5656 intr_status
& ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
5658 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
5659 if (enabled_intr_status
)
5660 retval
|= ufshcd_sl_intr(hba
, enabled_intr_status
);
5662 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
5665 if (enabled_intr_status
&& retval
== IRQ_NONE
&&
5666 !ufshcd_eh_in_progress(hba
)) {
5667 dev_err(hba
->dev
, "%s: Unhandled interrupt 0x%08x (-, 0x%08x)\n",
5670 enabled_intr_status
);
5671 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
5674 spin_unlock(hba
->host
->host_lock
);
5678 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
)
5681 u32 mask
= 1 << tag
;
5682 unsigned long flags
;
5684 if (!test_bit(tag
, &hba
->outstanding_tasks
))
5687 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5688 ufshcd_utmrl_clear(hba
, tag
);
5689 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5691 /* poll for max. 1 sec to clear door bell register by h/w */
5692 err
= ufshcd_wait_for_register(hba
,
5693 REG_UTP_TASK_REQ_DOOR_BELL
,
5694 mask
, 0, 1000, 1000, true);
5699 static int __ufshcd_issue_tm_cmd(struct ufs_hba
*hba
,
5700 struct utp_task_req_desc
*treq
, u8 tm_function
)
5702 struct request_queue
*q
= hba
->tmf_queue
;
5703 struct Scsi_Host
*host
= hba
->host
;
5704 DECLARE_COMPLETION_ONSTACK(wait
);
5705 struct request
*req
;
5706 unsigned long flags
;
5710 * blk_get_request() is used here only to get a free tag.
5712 req
= blk_get_request(q
, REQ_OP_DRV_OUT
, 0);
5714 return PTR_ERR(req
);
5716 req
->end_io_data
= &wait
;
5717 ufshcd_hold(hba
, false);
5719 spin_lock_irqsave(host
->host_lock
, flags
);
5720 blk_mq_start_request(req
);
5722 task_tag
= req
->tag
;
5723 treq
->req_header
.dword_0
|= cpu_to_be32(task_tag
);
5725 memcpy(hba
->utmrdl_base_addr
+ task_tag
, treq
, sizeof(*treq
));
5726 ufshcd_vops_setup_task_mgmt(hba
, task_tag
, tm_function
);
5728 /* send command to the controller */
5729 __set_bit(task_tag
, &hba
->outstanding_tasks
);
5731 /* Make sure descriptors are ready before ringing the task doorbell */
5734 ufshcd_writel(hba
, 1 << task_tag
, REG_UTP_TASK_REQ_DOOR_BELL
);
5735 /* Make sure that doorbell is committed immediately */
5738 spin_unlock_irqrestore(host
->host_lock
, flags
);
5740 ufshcd_add_tm_upiu_trace(hba
, task_tag
, "tm_send");
5742 /* wait until the task management command is completed */
5743 err
= wait_for_completion_io_timeout(&wait
,
5744 msecs_to_jiffies(TM_CMD_TIMEOUT
));
5747 * Make sure that ufshcd_compl_tm() does not trigger a
5750 req
->end_io_data
= NULL
;
5751 ufshcd_add_tm_upiu_trace(hba
, task_tag
, "tm_complete_err");
5752 dev_err(hba
->dev
, "%s: task management cmd 0x%.2x timed-out\n",
5753 __func__
, tm_function
);
5754 if (ufshcd_clear_tm_cmd(hba
, task_tag
))
5755 dev_WARN(hba
->dev
, "%s: unable to clear tm cmd (slot %d) after timeout\n",
5756 __func__
, task_tag
);
5760 memcpy(treq
, hba
->utmrdl_base_addr
+ task_tag
, sizeof(*treq
));
5762 ufshcd_add_tm_upiu_trace(hba
, task_tag
, "tm_complete");
5765 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5766 __clear_bit(task_tag
, &hba
->outstanding_tasks
);
5767 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5769 ufshcd_release(hba
);
5770 blk_put_request(req
);
5776 * ufshcd_issue_tm_cmd - issues task management commands to controller
5777 * @hba: per adapter instance
5778 * @lun_id: LUN ID to which TM command is sent
5779 * @task_id: task ID to which the TM command is applicable
5780 * @tm_function: task management function opcode
5781 * @tm_response: task management service response return value
5783 * Returns non-zero value on error, zero on success.
5785 static int ufshcd_issue_tm_cmd(struct ufs_hba
*hba
, int lun_id
, int task_id
,
5786 u8 tm_function
, u8
*tm_response
)
5788 struct utp_task_req_desc treq
= { { 0 }, };
5791 /* Configure task request descriptor */
5792 treq
.header
.dword_0
= cpu_to_le32(UTP_REQ_DESC_INT_CMD
);
5793 treq
.header
.dword_2
= cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
5795 /* Configure task request UPIU */
5796 treq
.req_header
.dword_0
= cpu_to_be32(lun_id
<< 8) |
5797 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ
<< 24);
5798 treq
.req_header
.dword_1
= cpu_to_be32(tm_function
<< 16);
5801 * The host shall provide the same value for LUN field in the basic
5802 * header and for Input Parameter.
5804 treq
.input_param1
= cpu_to_be32(lun_id
);
5805 treq
.input_param2
= cpu_to_be32(task_id
);
5807 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_function
);
5808 if (err
== -ETIMEDOUT
)
5811 ocs_value
= le32_to_cpu(treq
.header
.dword_2
) & MASK_OCS
;
5812 if (ocs_value
!= OCS_SUCCESS
)
5813 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n",
5814 __func__
, ocs_value
);
5815 else if (tm_response
)
5816 *tm_response
= be32_to_cpu(treq
.output_param1
) &
5817 MASK_TM_SERVICE_RESP
;
5822 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
5823 * @hba: per-adapter instance
5824 * @req_upiu: upiu request
5825 * @rsp_upiu: upiu reply
5826 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
5827 * @desc_buff: pointer to descriptor buffer, NULL if NA
5828 * @buff_len: descriptor size, 0 if NA
5829 * @desc_op: descriptor operation
5831 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
5832 * Therefore, it "rides" the device management infrastructure: uses its tag and
5833 * tasks work queues.
5835 * Since there is only one available tag for device management commands,
5836 * the caller is expected to hold the hba->dev_cmd.lock mutex.
5838 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba
*hba
,
5839 struct utp_upiu_req
*req_upiu
,
5840 struct utp_upiu_req
*rsp_upiu
,
5841 u8
*desc_buff
, int *buff_len
,
5843 enum query_opcode desc_op
)
5845 struct request_queue
*q
= hba
->cmd_queue
;
5846 struct request
*req
;
5847 struct ufshcd_lrb
*lrbp
;
5850 struct completion wait
;
5851 unsigned long flags
;
5854 down_read(&hba
->clk_scaling_lock
);
5856 req
= blk_get_request(q
, REQ_OP_DRV_OUT
, 0);
5862 WARN_ON_ONCE(!ufshcd_valid_tag(hba
, tag
));
5864 init_completion(&wait
);
5865 lrbp
= &hba
->lrb
[tag
];
5869 lrbp
->sense_bufflen
= 0;
5870 lrbp
->sense_buffer
= NULL
;
5871 lrbp
->task_tag
= tag
;
5873 lrbp
->intr_cmd
= true;
5874 hba
->dev_cmd
.type
= cmd_type
;
5876 switch (hba
->ufs_version
) {
5877 case UFSHCI_VERSION_10
:
5878 case UFSHCI_VERSION_11
:
5879 lrbp
->command_type
= UTP_CMD_TYPE_DEV_MANAGE
;
5882 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
5886 /* update the task tag in the request upiu */
5887 req_upiu
->header
.dword_0
|= cpu_to_be32(tag
);
5889 ufshcd_prepare_req_desc_hdr(lrbp
, &upiu_flags
, DMA_NONE
);
5891 /* just copy the upiu request as it is */
5892 memcpy(lrbp
->ucd_req_ptr
, req_upiu
, sizeof(*lrbp
->ucd_req_ptr
));
5893 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_WRITE_DESC
) {
5894 /* The Data Segment Area is optional depending upon the query
5895 * function value. for WRITE DESCRIPTOR, the data segment
5896 * follows right after the tsf.
5898 memcpy(lrbp
->ucd_req_ptr
+ 1, desc_buff
, *buff_len
);
5902 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
5904 hba
->dev_cmd
.complete
= &wait
;
5906 /* Make sure descriptors are ready before ringing the doorbell */
5908 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5909 ufshcd_send_command(hba
, tag
);
5910 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5913 * ignore the returning value here - ufshcd_check_query_response is
5914 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
5915 * read the response directly ignoring all errors.
5917 ufshcd_wait_for_dev_cmd(hba
, lrbp
, QUERY_REQ_TIMEOUT
);
5919 /* just copy the upiu response as it is */
5920 memcpy(rsp_upiu
, lrbp
->ucd_rsp_ptr
, sizeof(*rsp_upiu
));
5921 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_READ_DESC
) {
5922 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+ sizeof(*rsp_upiu
);
5923 u16 resp_len
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->header
.dword_2
) &
5924 MASK_QUERY_DATA_SEG_LEN
;
5926 if (*buff_len
>= resp_len
) {
5927 memcpy(desc_buff
, descp
, resp_len
);
5928 *buff_len
= resp_len
;
5930 dev_warn(hba
->dev
, "rsp size is bigger than buffer");
5936 blk_put_request(req
);
5938 up_read(&hba
->clk_scaling_lock
);
5943 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
5944 * @hba: per-adapter instance
5945 * @req_upiu: upiu request
5946 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
5947 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
5948 * @desc_buff: pointer to descriptor buffer, NULL if NA
5949 * @buff_len: descriptor size, 0 if NA
5950 * @desc_op: descriptor operation
5952 * Supports UTP Transfer requests (nop and query), and UTP Task
5953 * Management requests.
5954 * It is up to the caller to fill the upiu conent properly, as it will
5955 * be copied without any further input validations.
5957 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba
*hba
,
5958 struct utp_upiu_req
*req_upiu
,
5959 struct utp_upiu_req
*rsp_upiu
,
5961 u8
*desc_buff
, int *buff_len
,
5962 enum query_opcode desc_op
)
5965 int cmd_type
= DEV_CMD_TYPE_QUERY
;
5966 struct utp_task_req_desc treq
= { { 0 }, };
5968 u8 tm_f
= be32_to_cpu(req_upiu
->header
.dword_1
) >> 16 & MASK_TM_FUNC
;
5971 case UPIU_TRANSACTION_NOP_OUT
:
5972 cmd_type
= DEV_CMD_TYPE_NOP
;
5974 case UPIU_TRANSACTION_QUERY_REQ
:
5975 ufshcd_hold(hba
, false);
5976 mutex_lock(&hba
->dev_cmd
.lock
);
5977 err
= ufshcd_issue_devman_upiu_cmd(hba
, req_upiu
, rsp_upiu
,
5978 desc_buff
, buff_len
,
5980 mutex_unlock(&hba
->dev_cmd
.lock
);
5981 ufshcd_release(hba
);
5984 case UPIU_TRANSACTION_TASK_REQ
:
5985 treq
.header
.dword_0
= cpu_to_le32(UTP_REQ_DESC_INT_CMD
);
5986 treq
.header
.dword_2
= cpu_to_le32(OCS_INVALID_COMMAND_STATUS
);
5988 memcpy(&treq
.req_header
, req_upiu
, sizeof(*req_upiu
));
5990 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_f
);
5991 if (err
== -ETIMEDOUT
)
5994 ocs_value
= le32_to_cpu(treq
.header
.dword_2
) & MASK_OCS
;
5995 if (ocs_value
!= OCS_SUCCESS
) {
5996 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n", __func__
,
6001 memcpy(rsp_upiu
, &treq
.rsp_header
, sizeof(*rsp_upiu
));
6014 * ufshcd_eh_device_reset_handler - device reset handler registered to
6016 * @cmd: SCSI command pointer
6018 * Returns SUCCESS/FAILED
6020 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd
*cmd
)
6022 struct Scsi_Host
*host
;
6023 struct ufs_hba
*hba
;
6027 unsigned long flags
;
6029 host
= cmd
->device
->host
;
6030 hba
= shost_priv(host
);
6032 lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
6033 err
= ufshcd_issue_tm_cmd(hba
, lun
, 0, UFS_LOGICAL_RESET
, &resp
);
6034 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
6040 /* clear the commands that were pending for corresponding LUN */
6041 for_each_set_bit(pos
, &hba
->outstanding_reqs
, hba
->nutrs
) {
6042 if (hba
->lrb
[pos
].lun
== lun
) {
6043 err
= ufshcd_clear_cmd(hba
, pos
);
6048 spin_lock_irqsave(host
->host_lock
, flags
);
6049 ufshcd_transfer_req_compl(hba
);
6050 spin_unlock_irqrestore(host
->host_lock
, flags
);
6053 hba
->req_abort_count
= 0;
6054 ufshcd_update_reg_hist(&hba
->ufs_stats
.dev_reset
, (u32
)err
);
6058 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
6064 static void ufshcd_set_req_abort_skip(struct ufs_hba
*hba
, unsigned long bitmap
)
6066 struct ufshcd_lrb
*lrbp
;
6069 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
6070 lrbp
= &hba
->lrb
[tag
];
6071 lrbp
->req_abort_skip
= true;
6076 * ufshcd_abort - abort a specific command
6077 * @cmd: SCSI command pointer
6079 * Abort the pending command in device by sending UFS_ABORT_TASK task management
6080 * command, and in host controller by clearing the door-bell register. There can
6081 * be race between controller sending the command to the device while abort is
6082 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6083 * really issued and then try to abort it.
6085 * Returns SUCCESS/FAILED
6087 static int ufshcd_abort(struct scsi_cmnd
*cmd
)
6089 struct Scsi_Host
*host
;
6090 struct ufs_hba
*hba
;
6091 unsigned long flags
;
6096 struct ufshcd_lrb
*lrbp
;
6099 host
= cmd
->device
->host
;
6100 hba
= shost_priv(host
);
6101 tag
= cmd
->request
->tag
;
6102 lrbp
= &hba
->lrb
[tag
];
6103 if (!ufshcd_valid_tag(hba
, tag
)) {
6105 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6106 __func__
, tag
, cmd
, cmd
->request
);
6111 * Task abort to the device W-LUN is illegal. When this command
6112 * will fail, due to spec violation, scsi err handling next step
6113 * will be to send LU reset which, again, is a spec violation.
6114 * To avoid these unnecessary/illegal step we skip to the last error
6115 * handling stage: reset and restore.
6117 if (lrbp
->lun
== UFS_UPIU_UFS_DEVICE_WLUN
)
6118 return ufshcd_eh_host_reset_handler(cmd
);
6120 ufshcd_hold(hba
, false);
6121 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
6122 /* If command is already aborted/completed, return SUCCESS */
6123 if (!(test_bit(tag
, &hba
->outstanding_reqs
))) {
6125 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6126 __func__
, tag
, hba
->outstanding_reqs
, reg
);
6130 if (!(reg
& (1 << tag
))) {
6132 "%s: cmd was completed, but without a notifying intr, tag = %d",
6136 /* Print Transfer Request of aborted task */
6137 dev_err(hba
->dev
, "%s: Device abort task at tag %d\n", __func__
, tag
);
6140 * Print detailed info about aborted request.
6141 * As more than one request might get aborted at the same time,
6142 * print full information only for the first aborted request in order
6143 * to reduce repeated printouts. For other aborted requests only print
6146 scsi_print_command(hba
->lrb
[tag
].cmd
);
6147 if (!hba
->req_abort_count
) {
6148 ufshcd_update_reg_hist(&hba
->ufs_stats
.task_abort
, 0);
6149 ufshcd_print_host_regs(hba
);
6150 ufshcd_print_host_state(hba
);
6151 ufshcd_print_pwr_info(hba
);
6152 ufshcd_print_trs(hba
, 1 << tag
, true);
6154 ufshcd_print_trs(hba
, 1 << tag
, false);
6156 hba
->req_abort_count
++;
6158 /* Skip task abort in case previous aborts failed and report failure */
6159 if (lrbp
->req_abort_skip
) {
6164 for (poll_cnt
= 100; poll_cnt
; poll_cnt
--) {
6165 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
6166 UFS_QUERY_TASK
, &resp
);
6167 if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED
) {
6168 /* cmd pending in the device */
6169 dev_err(hba
->dev
, "%s: cmd pending in the device. tag = %d\n",
6172 } else if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
6174 * cmd not pending in the device, check if it is
6177 dev_err(hba
->dev
, "%s: cmd at tag %d not pending in the device.\n",
6179 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
6180 if (reg
& (1 << tag
)) {
6181 /* sleep for max. 200us to stabilize */
6182 usleep_range(100, 200);
6185 /* command completed already */
6186 dev_err(hba
->dev
, "%s: cmd at tag %d successfully cleared from DB.\n",
6191 "%s: no response from device. tag = %d, err %d\n",
6192 __func__
, tag
, err
);
6194 err
= resp
; /* service response error */
6204 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
6205 UFS_ABORT_TASK
, &resp
);
6206 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
6208 err
= resp
; /* service response error */
6209 dev_err(hba
->dev
, "%s: issued. tag = %d, err %d\n",
6210 __func__
, tag
, err
);
6215 err
= ufshcd_clear_cmd(hba
, tag
);
6217 dev_err(hba
->dev
, "%s: Failed clearing cmd at tag %d, err %d\n",
6218 __func__
, tag
, err
);
6223 scsi_dma_unmap(cmd
);
6225 spin_lock_irqsave(host
->host_lock
, flags
);
6226 ufshcd_outstanding_req_clear(hba
, tag
);
6227 hba
->lrb
[tag
].cmd
= NULL
;
6228 spin_unlock_irqrestore(host
->host_lock
, flags
);
6234 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
6235 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
6240 * This ufshcd_release() corresponds to the original scsi cmd that got
6241 * aborted here (as we won't get any IRQ for it).
6243 ufshcd_release(hba
);
6248 * ufshcd_host_reset_and_restore - reset and restore host controller
6249 * @hba: per-adapter instance
6251 * Note that host controller reset may issue DME_RESET to
6252 * local and remote (device) Uni-Pro stack and the attributes
6253 * are reset to default state.
6255 * Returns zero on success, non-zero on failure
6257 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
)
6260 unsigned long flags
;
6263 * Stop the host controller and complete the requests
6266 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6267 ufshcd_hba_stop(hba
, false);
6268 hba
->silence_err_logs
= true;
6269 ufshcd_complete_requests(hba
);
6270 hba
->silence_err_logs
= false;
6271 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6273 /* scale up clocks to max frequency before full reinitialization */
6274 ufshcd_scale_clks(hba
, true);
6276 err
= ufshcd_hba_enable(hba
);
6280 /* Establish the link again and restore the device */
6281 err
= ufshcd_probe_hba(hba
);
6283 if (!err
&& (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
))
6287 dev_err(hba
->dev
, "%s: Host init failed %d\n", __func__
, err
);
6288 ufshcd_update_reg_hist(&hba
->ufs_stats
.host_reset
, (u32
)err
);
6293 * ufshcd_reset_and_restore - reset and re-initialize host/device
6294 * @hba: per-adapter instance
6296 * Reset and recover device, host and re-establish link. This
6297 * is helpful to recover the communication in fatal error conditions.
6299 * Returns zero on success, non-zero on failure
6301 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
)
6304 int retries
= MAX_HOST_RESET_RETRIES
;
6307 /* Reset the attached device */
6308 ufshcd_vops_device_reset(hba
);
6310 err
= ufshcd_host_reset_and_restore(hba
);
6311 } while (err
&& --retries
);
6317 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
6318 * @cmd: SCSI command pointer
6320 * Returns SUCCESS/FAILED
6322 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
)
6325 unsigned long flags
;
6326 struct ufs_hba
*hba
;
6328 hba
= shost_priv(cmd
->device
->host
);
6330 ufshcd_hold(hba
, false);
6332 * Check if there is any race with fatal error handling.
6333 * If so, wait for it to complete. Even though fatal error
6334 * handling does reset and restore in some cases, don't assume
6335 * anything out of it. We are just avoiding race here.
6338 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6339 if (!(work_pending(&hba
->eh_work
) ||
6340 hba
->ufshcd_state
== UFSHCD_STATE_RESET
||
6341 hba
->ufshcd_state
== UFSHCD_STATE_EH_SCHEDULED
))
6343 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6344 dev_dbg(hba
->dev
, "%s: reset in progress\n", __func__
);
6345 flush_work(&hba
->eh_work
);
6348 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
6349 ufshcd_set_eh_in_progress(hba
);
6350 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6352 err
= ufshcd_reset_and_restore(hba
);
6354 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6357 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6360 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
6362 ufshcd_clear_eh_in_progress(hba
);
6363 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6365 ufshcd_release(hba
);
6370 * ufshcd_get_max_icc_level - calculate the ICC level
6371 * @sup_curr_uA: max. current supported by the regulator
6372 * @start_scan: row at the desc table to start scan from
6373 * @buff: power descriptor buffer
6375 * Returns calculated max ICC level for specific regulator
6377 static u32
ufshcd_get_max_icc_level(int sup_curr_uA
, u32 start_scan
, char *buff
)
6384 for (i
= start_scan
; i
>= 0; i
--) {
6385 data
= be16_to_cpup((__be16
*)&buff
[2 * i
]);
6386 unit
= (data
& ATTR_ICC_LVL_UNIT_MASK
) >>
6387 ATTR_ICC_LVL_UNIT_OFFSET
;
6388 curr_uA
= data
& ATTR_ICC_LVL_VALUE_MASK
;
6390 case UFSHCD_NANO_AMP
:
6391 curr_uA
= curr_uA
/ 1000;
6393 case UFSHCD_MILI_AMP
:
6394 curr_uA
= curr_uA
* 1000;
6397 curr_uA
= curr_uA
* 1000 * 1000;
6399 case UFSHCD_MICRO_AMP
:
6403 if (sup_curr_uA
>= curr_uA
)
6408 pr_err("%s: Couldn't find valid icc_level = %d", __func__
, i
);
6415 * ufshcd_calc_icc_level - calculate the max ICC level
6416 * In case regulators are not initialized we'll return 0
6417 * @hba: per-adapter instance
6418 * @desc_buf: power descriptor buffer to extract ICC levels from.
6419 * @len: length of desc_buff
6421 * Returns calculated ICC level
6423 static u32
ufshcd_find_max_sup_active_icc_level(struct ufs_hba
*hba
,
6424 u8
*desc_buf
, int len
)
6428 if (!hba
->vreg_info
.vcc
|| !hba
->vreg_info
.vccq
||
6429 !hba
->vreg_info
.vccq2
) {
6431 "%s: Regulator capability was not set, actvIccLevel=%d",
6432 __func__
, icc_level
);
6436 if (hba
->vreg_info
.vcc
&& hba
->vreg_info
.vcc
->max_uA
)
6437 icc_level
= ufshcd_get_max_icc_level(
6438 hba
->vreg_info
.vcc
->max_uA
,
6439 POWER_DESC_MAX_ACTV_ICC_LVLS
- 1,
6440 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCC_0
]);
6442 if (hba
->vreg_info
.vccq
&& hba
->vreg_info
.vccq
->max_uA
)
6443 icc_level
= ufshcd_get_max_icc_level(
6444 hba
->vreg_info
.vccq
->max_uA
,
6446 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ_0
]);
6448 if (hba
->vreg_info
.vccq2
&& hba
->vreg_info
.vccq2
->max_uA
)
6449 icc_level
= ufshcd_get_max_icc_level(
6450 hba
->vreg_info
.vccq2
->max_uA
,
6452 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ2_0
]);
6457 static void ufshcd_init_icc_levels(struct ufs_hba
*hba
)
6460 int buff_len
= hba
->desc_size
.pwr_desc
;
6463 desc_buf
= kmalloc(buff_len
, GFP_KERNEL
);
6467 ret
= ufshcd_read_power_desc(hba
, desc_buf
, buff_len
);
6470 "%s: Failed reading power descriptor.len = %d ret = %d",
6471 __func__
, buff_len
, ret
);
6475 hba
->init_prefetch_data
.icc_level
=
6476 ufshcd_find_max_sup_active_icc_level(hba
,
6477 desc_buf
, buff_len
);
6478 dev_dbg(hba
->dev
, "%s: setting icc_level 0x%x",
6479 __func__
, hba
->init_prefetch_data
.icc_level
);
6481 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
6482 QUERY_ATTR_IDN_ACTIVE_ICC_LVL
, 0, 0,
6483 &hba
->init_prefetch_data
.icc_level
);
6487 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6488 __func__
, hba
->init_prefetch_data
.icc_level
, ret
);
6495 * ufshcd_scsi_add_wlus - Adds required W-LUs
6496 * @hba: per-adapter instance
6498 * UFS device specification requires the UFS devices to support 4 well known
6500 * "REPORT_LUNS" (address: 01h)
6501 * "UFS Device" (address: 50h)
6502 * "RPMB" (address: 44h)
6503 * "BOOT" (address: 30h)
6504 * UFS device's power management needs to be controlled by "POWER CONDITION"
6505 * field of SSU (START STOP UNIT) command. But this "power condition" field
6506 * will take effect only when its sent to "UFS device" well known logical unit
6507 * hence we require the scsi_device instance to represent this logical unit in
6508 * order for the UFS host driver to send the SSU command for power management.
6510 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6511 * Block) LU so user space process can control this LU. User space may also
6512 * want to have access to BOOT LU.
6514 * This function adds scsi device instances for each of all well known LUs
6515 * (except "REPORT LUNS" LU).
6517 * Returns zero on success (all required W-LUs are added successfully),
6518 * non-zero error value on failure (if failed to add any of the required W-LU).
6520 static int ufshcd_scsi_add_wlus(struct ufs_hba
*hba
)
6523 struct scsi_device
*sdev_rpmb
;
6524 struct scsi_device
*sdev_boot
;
6526 hba
->sdev_ufs_device
= __scsi_add_device(hba
->host
, 0, 0,
6527 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
), NULL
);
6528 if (IS_ERR(hba
->sdev_ufs_device
)) {
6529 ret
= PTR_ERR(hba
->sdev_ufs_device
);
6530 hba
->sdev_ufs_device
= NULL
;
6533 scsi_device_put(hba
->sdev_ufs_device
);
6535 sdev_rpmb
= __scsi_add_device(hba
->host
, 0, 0,
6536 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN
), NULL
);
6537 if (IS_ERR(sdev_rpmb
)) {
6538 ret
= PTR_ERR(sdev_rpmb
);
6539 goto remove_sdev_ufs_device
;
6541 scsi_device_put(sdev_rpmb
);
6543 sdev_boot
= __scsi_add_device(hba
->host
, 0, 0,
6544 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN
), NULL
);
6545 if (IS_ERR(sdev_boot
))
6546 dev_err(hba
->dev
, "%s: BOOT WLUN not found\n", __func__
);
6548 scsi_device_put(sdev_boot
);
6551 remove_sdev_ufs_device
:
6552 scsi_remove_device(hba
->sdev_ufs_device
);
6557 static int ufs_get_device_desc(struct ufs_hba
*hba
,
6558 struct ufs_dev_desc
*dev_desc
)
6568 buff_len
= max_t(size_t, hba
->desc_size
.dev_desc
,
6569 QUERY_DESC_MAX_SIZE
+ 1);
6570 desc_buf
= kmalloc(buff_len
, GFP_KERNEL
);
6576 err
= ufshcd_read_device_desc(hba
, desc_buf
, hba
->desc_size
.dev_desc
);
6578 dev_err(hba
->dev
, "%s: Failed reading Device Desc. err = %d\n",
6584 * getting vendor (manufacturerID) and Bank Index in big endian
6587 dev_desc
->wmanufacturerid
= desc_buf
[DEVICE_DESC_PARAM_MANF_ID
] << 8 |
6588 desc_buf
[DEVICE_DESC_PARAM_MANF_ID
+ 1];
6590 model_index
= desc_buf
[DEVICE_DESC_PARAM_PRDCT_NAME
];
6591 err
= ufshcd_read_string_desc(hba
, model_index
,
6592 &dev_desc
->model
, SD_ASCII_STD
);
6594 dev_err(hba
->dev
, "%s: Failed reading Product Name. err = %d\n",
6600 * ufshcd_read_string_desc returns size of the string
6601 * reset the error value
6610 static void ufs_put_device_desc(struct ufs_dev_desc
*dev_desc
)
6612 kfree(dev_desc
->model
);
6613 dev_desc
->model
= NULL
;
6616 static void ufs_fixup_device_setup(struct ufs_hba
*hba
,
6617 struct ufs_dev_desc
*dev_desc
)
6619 struct ufs_dev_fix
*f
;
6621 for (f
= ufs_fixups
; f
->quirk
; f
++) {
6622 if ((f
->card
.wmanufacturerid
== dev_desc
->wmanufacturerid
||
6623 f
->card
.wmanufacturerid
== UFS_ANY_VENDOR
) &&
6624 ((dev_desc
->model
&&
6625 STR_PRFX_EQUAL(f
->card
.model
, dev_desc
->model
)) ||
6626 !strcmp(f
->card
.model
, UFS_ANY_MODEL
)))
6627 hba
->dev_quirks
|= f
->quirk
;
6632 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6633 * @hba: per-adapter instance
6635 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6636 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6637 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6638 * the hibern8 exit latency.
6640 * Returns zero on success, non-zero error value on failure.
6642 static int ufshcd_tune_pa_tactivate(struct ufs_hba
*hba
)
6645 u32 peer_rx_min_activatetime
= 0, tuned_pa_tactivate
;
6647 ret
= ufshcd_dme_peer_get(hba
,
6649 RX_MIN_ACTIVATETIME_CAPABILITY
,
6650 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6651 &peer_rx_min_activatetime
);
6655 /* make sure proper unit conversion is applied */
6656 tuned_pa_tactivate
=
6657 ((peer_rx_min_activatetime
* RX_MIN_ACTIVATETIME_UNIT_US
)
6658 / PA_TACTIVATE_TIME_UNIT_US
);
6659 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
6660 tuned_pa_tactivate
);
6667 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6668 * @hba: per-adapter instance
6670 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6671 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6672 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6673 * This optimal value can help reduce the hibern8 exit latency.
6675 * Returns zero on success, non-zero error value on failure.
6677 static int ufshcd_tune_pa_hibern8time(struct ufs_hba
*hba
)
6680 u32 local_tx_hibern8_time_cap
= 0, peer_rx_hibern8_time_cap
= 0;
6681 u32 max_hibern8_time
, tuned_pa_hibern8time
;
6683 ret
= ufshcd_dme_get(hba
,
6684 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY
,
6685 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6686 &local_tx_hibern8_time_cap
);
6690 ret
= ufshcd_dme_peer_get(hba
,
6691 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY
,
6692 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6693 &peer_rx_hibern8_time_cap
);
6697 max_hibern8_time
= max(local_tx_hibern8_time_cap
,
6698 peer_rx_hibern8_time_cap
);
6699 /* make sure proper unit conversion is applied */
6700 tuned_pa_hibern8time
= ((max_hibern8_time
* HIBERN8TIME_UNIT_US
)
6701 / PA_HIBERN8_TIME_UNIT_US
);
6702 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HIBERN8TIME
),
6703 tuned_pa_hibern8time
);
6709 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
6710 * less than device PA_TACTIVATE time.
6711 * @hba: per-adapter instance
6713 * Some UFS devices require host PA_TACTIVATE to be lower than device
6714 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
6717 * Returns zero on success, non-zero error value on failure.
6719 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba
*hba
)
6722 u32 granularity
, peer_granularity
;
6723 u32 pa_tactivate
, peer_pa_tactivate
;
6724 u32 pa_tactivate_us
, peer_pa_tactivate_us
;
6725 u8 gran_to_us_table
[] = {1, 4, 8, 16, 32, 100};
6727 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
6732 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
6737 if ((granularity
< PA_GRANULARITY_MIN_VAL
) ||
6738 (granularity
> PA_GRANULARITY_MAX_VAL
)) {
6739 dev_err(hba
->dev
, "%s: invalid host PA_GRANULARITY %d",
6740 __func__
, granularity
);
6744 if ((peer_granularity
< PA_GRANULARITY_MIN_VAL
) ||
6745 (peer_granularity
> PA_GRANULARITY_MAX_VAL
)) {
6746 dev_err(hba
->dev
, "%s: invalid device PA_GRANULARITY %d",
6747 __func__
, peer_granularity
);
6751 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
), &pa_tactivate
);
6755 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
6756 &peer_pa_tactivate
);
6760 pa_tactivate_us
= pa_tactivate
* gran_to_us_table
[granularity
- 1];
6761 peer_pa_tactivate_us
= peer_pa_tactivate
*
6762 gran_to_us_table
[peer_granularity
- 1];
6764 if (pa_tactivate_us
> peer_pa_tactivate_us
) {
6765 u32 new_peer_pa_tactivate
;
6767 new_peer_pa_tactivate
= pa_tactivate_us
/
6768 gran_to_us_table
[peer_granularity
- 1];
6769 new_peer_pa_tactivate
++;
6770 ret
= ufshcd_dme_peer_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
6771 new_peer_pa_tactivate
);
6778 static void ufshcd_tune_unipro_params(struct ufs_hba
*hba
,
6779 struct ufs_dev_desc
*card
)
6781 if (ufshcd_is_unipro_pa_params_tuning_req(hba
)) {
6782 ufshcd_tune_pa_tactivate(hba
);
6783 ufshcd_tune_pa_hibern8time(hba
);
6786 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_PA_TACTIVATE
)
6787 /* set 1ms timeout for PA_TACTIVATE */
6788 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
), 10);
6790 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
)
6791 ufshcd_quirk_tune_host_pa_tactivate(hba
);
6793 ufshcd_vops_apply_dev_quirks(hba
, card
);
6796 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba
*hba
)
6798 hba
->ufs_stats
.hibern8_exit_cnt
= 0;
6799 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
6800 hba
->req_abort_count
= 0;
6803 static void ufshcd_init_desc_sizes(struct ufs_hba
*hba
)
6807 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_DEVICE
, 0,
6808 &hba
->desc_size
.dev_desc
);
6810 hba
->desc_size
.dev_desc
= QUERY_DESC_DEVICE_DEF_SIZE
;
6812 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_POWER
, 0,
6813 &hba
->desc_size
.pwr_desc
);
6815 hba
->desc_size
.pwr_desc
= QUERY_DESC_POWER_DEF_SIZE
;
6817 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_INTERCONNECT
, 0,
6818 &hba
->desc_size
.interc_desc
);
6820 hba
->desc_size
.interc_desc
= QUERY_DESC_INTERCONNECT_DEF_SIZE
;
6822 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_CONFIGURATION
, 0,
6823 &hba
->desc_size
.conf_desc
);
6825 hba
->desc_size
.conf_desc
= QUERY_DESC_CONFIGURATION_DEF_SIZE
;
6827 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_UNIT
, 0,
6828 &hba
->desc_size
.unit_desc
);
6830 hba
->desc_size
.unit_desc
= QUERY_DESC_UNIT_DEF_SIZE
;
6832 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_GEOMETRY
, 0,
6833 &hba
->desc_size
.geom_desc
);
6835 hba
->desc_size
.geom_desc
= QUERY_DESC_GEOMETRY_DEF_SIZE
;
6837 err
= ufshcd_read_desc_length(hba
, QUERY_DESC_IDN_HEALTH
, 0,
6838 &hba
->desc_size
.hlth_desc
);
6840 hba
->desc_size
.hlth_desc
= QUERY_DESC_HEALTH_DEF_SIZE
;
6843 static struct ufs_ref_clk ufs_ref_clk_freqs
[] = {
6844 {19200000, REF_CLK_FREQ_19_2_MHZ
},
6845 {26000000, REF_CLK_FREQ_26_MHZ
},
6846 {38400000, REF_CLK_FREQ_38_4_MHZ
},
6847 {52000000, REF_CLK_FREQ_52_MHZ
},
6848 {0, REF_CLK_FREQ_INVAL
},
6851 static enum ufs_ref_clk_freq
6852 ufs_get_bref_clk_from_hz(unsigned long freq
)
6856 for (i
= 0; ufs_ref_clk_freqs
[i
].freq_hz
; i
++)
6857 if (ufs_ref_clk_freqs
[i
].freq_hz
== freq
)
6858 return ufs_ref_clk_freqs
[i
].val
;
6860 return REF_CLK_FREQ_INVAL
;
6863 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba
*hba
, struct clk
*refclk
)
6867 freq
= clk_get_rate(refclk
);
6869 hba
->dev_ref_clk_freq
=
6870 ufs_get_bref_clk_from_hz(freq
);
6872 if (hba
->dev_ref_clk_freq
== REF_CLK_FREQ_INVAL
)
6874 "invalid ref_clk setting = %ld\n", freq
);
6877 static int ufshcd_set_dev_ref_clk(struct ufs_hba
*hba
)
6881 u32 freq
= hba
->dev_ref_clk_freq
;
6883 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
6884 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &ref_clk
);
6887 dev_err(hba
->dev
, "failed reading bRefClkFreq. err = %d\n",
6892 if (ref_clk
== freq
)
6893 goto out
; /* nothing to update */
6895 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
6896 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &freq
);
6899 dev_err(hba
->dev
, "bRefClkFreq setting to %lu Hz failed\n",
6900 ufs_ref_clk_freqs
[freq
].freq_hz
);
6904 dev_dbg(hba
->dev
, "bRefClkFreq setting to %lu Hz succeeded\n",
6905 ufs_ref_clk_freqs
[freq
].freq_hz
);
6912 * ufshcd_probe_hba - probe hba to detect device and initialize
6913 * @hba: per-adapter instance
6915 * Execute link-startup and verify device initialization
6917 static int ufshcd_probe_hba(struct ufs_hba
*hba
)
6919 struct ufs_dev_desc card
= {0};
6921 ktime_t start
= ktime_get();
6923 ret
= ufshcd_link_startup(hba
);
6927 /* set the default level for urgent bkops */
6928 hba
->urgent_bkops_lvl
= BKOPS_STATUS_PERF_IMPACT
;
6929 hba
->is_urgent_bkops_lvl_checked
= false;
6931 /* Debug counters initialization */
6932 ufshcd_clear_dbg_ufs_stats(hba
);
6934 /* UniPro link is active now */
6935 ufshcd_set_link_active(hba
);
6937 ret
= ufshcd_verify_dev_init(hba
);
6941 ret
= ufshcd_complete_dev_init(hba
);
6945 /* Init check for device descriptor sizes */
6946 ufshcd_init_desc_sizes(hba
);
6948 ret
= ufs_get_device_desc(hba
, &card
);
6950 dev_err(hba
->dev
, "%s: Failed getting device info. err = %d\n",
6955 ufs_fixup_device_setup(hba
, &card
);
6956 ufshcd_tune_unipro_params(hba
, &card
);
6957 ufs_put_device_desc(&card
);
6959 /* UFS device is also active now */
6960 ufshcd_set_ufs_dev_active(hba
);
6961 ufshcd_force_reset_auto_bkops(hba
);
6962 hba
->wlun_dev_clr_ua
= true;
6964 if (ufshcd_get_max_pwr_mode(hba
)) {
6966 "%s: Failed getting max supported power mode\n",
6970 * Set the right value to bRefClkFreq before attempting to
6971 * switch to HS gears.
6973 if (hba
->dev_ref_clk_freq
!= REF_CLK_FREQ_INVAL
)
6974 ufshcd_set_dev_ref_clk(hba
);
6975 ret
= ufshcd_config_pwr_mode(hba
, &hba
->max_pwr_info
.info
);
6977 dev_err(hba
->dev
, "%s: Failed setting power mode, err = %d\n",
6983 /* set the state as operational after switching to desired gear */
6984 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6986 /* Enable Auto-Hibernate if configured */
6987 ufshcd_auto_hibern8_enable(hba
);
6990 * If we are in error handling context or in power management callbacks
6991 * context, no need to scan the host
6993 if (!ufshcd_eh_in_progress(hba
) && !hba
->pm_op_in_progress
) {
6996 /* clear any previous UFS device information */
6997 memset(&hba
->dev_info
, 0, sizeof(hba
->dev_info
));
6998 if (!ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
6999 QUERY_FLAG_IDN_PWR_ON_WPE
, &flag
))
7000 hba
->dev_info
.f_power_on_wp_en
= flag
;
7002 if (!hba
->is_init_prefetch
)
7003 ufshcd_init_icc_levels(hba
);
7005 /* Add required well known logical units to scsi mid layer */
7006 ret
= ufshcd_scsi_add_wlus(hba
);
7010 /* Initialize devfreq after UFS device is detected */
7011 if (ufshcd_is_clkscaling_supported(hba
)) {
7012 memcpy(&hba
->clk_scaling
.saved_pwr_info
.info
,
7014 sizeof(struct ufs_pa_layer_attr
));
7015 hba
->clk_scaling
.saved_pwr_info
.is_valid
= true;
7016 if (!hba
->devfreq
) {
7017 ret
= ufshcd_devfreq_init(hba
);
7021 hba
->clk_scaling
.is_allowed
= true;
7026 scsi_scan_host(hba
->host
);
7027 pm_runtime_put_sync(hba
->dev
);
7030 if (!hba
->is_init_prefetch
)
7031 hba
->is_init_prefetch
= true;
7035 * If we failed to initialize the device or the device is not
7036 * present, turn off the power/clocks etc.
7038 if (ret
&& !ufshcd_eh_in_progress(hba
) && !hba
->pm_op_in_progress
) {
7039 pm_runtime_put_sync(hba
->dev
);
7040 ufshcd_exit_clk_scaling(hba
);
7041 ufshcd_hba_exit(hba
);
7044 trace_ufshcd_init(dev_name(hba
->dev
), ret
,
7045 ktime_to_us(ktime_sub(ktime_get(), start
)),
7046 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
7051 * ufshcd_async_scan - asynchronous execution for probing hba
7052 * @data: data pointer to pass to this function
7053 * @cookie: cookie data
7055 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
)
7057 struct ufs_hba
*hba
= (struct ufs_hba
*)data
;
7059 ufshcd_probe_hba(hba
);
7062 static enum blk_eh_timer_return
ufshcd_eh_timed_out(struct scsi_cmnd
*scmd
)
7064 unsigned long flags
;
7065 struct Scsi_Host
*host
;
7066 struct ufs_hba
*hba
;
7070 if (!scmd
|| !scmd
->device
|| !scmd
->device
->host
)
7073 host
= scmd
->device
->host
;
7074 hba
= shost_priv(host
);
7078 spin_lock_irqsave(host
->host_lock
, flags
);
7080 for_each_set_bit(index
, &hba
->outstanding_reqs
, hba
->nutrs
) {
7081 if (hba
->lrb
[index
].cmd
== scmd
) {
7087 spin_unlock_irqrestore(host
->host_lock
, flags
);
7090 * Bypass SCSI error handling and reset the block layer timer if this
7091 * SCSI command was not actually dispatched to UFS driver, otherwise
7092 * let SCSI layer handle the error as usual.
7094 return found
? BLK_EH_DONE
: BLK_EH_RESET_TIMER
;
7097 static const struct attribute_group
*ufshcd_driver_groups
[] = {
7098 &ufs_sysfs_unit_descriptor_group
,
7099 &ufs_sysfs_lun_attributes_group
,
7103 static struct scsi_host_template ufshcd_driver_template
= {
7104 .module
= THIS_MODULE
,
7106 .proc_name
= UFSHCD
,
7107 .queuecommand
= ufshcd_queuecommand
,
7108 .slave_alloc
= ufshcd_slave_alloc
,
7109 .slave_configure
= ufshcd_slave_configure
,
7110 .slave_destroy
= ufshcd_slave_destroy
,
7111 .change_queue_depth
= ufshcd_change_queue_depth
,
7112 .eh_abort_handler
= ufshcd_abort
,
7113 .eh_device_reset_handler
= ufshcd_eh_device_reset_handler
,
7114 .eh_host_reset_handler
= ufshcd_eh_host_reset_handler
,
7115 .eh_timed_out
= ufshcd_eh_timed_out
,
7117 .sg_tablesize
= SG_ALL
,
7118 .cmd_per_lun
= UFSHCD_CMD_PER_LUN
,
7119 .can_queue
= UFSHCD_CAN_QUEUE
,
7120 .max_segment_size
= PRDT_DATA_BYTE_COUNT_MAX
,
7121 .max_host_blocked
= 1,
7122 .track_queue_depth
= 1,
7123 .sdev_groups
= ufshcd_driver_groups
,
7124 .dma_boundary
= PAGE_SIZE
- 1,
7127 static int ufshcd_config_vreg_load(struct device
*dev
, struct ufs_vreg
*vreg
,
7136 * "set_load" operation shall be required on those regulators
7137 * which specifically configured current limitation. Otherwise
7138 * zero max_uA may cause unexpected behavior when regulator is
7139 * enabled or set as high power mode.
7144 ret
= regulator_set_load(vreg
->reg
, ua
);
7146 dev_err(dev
, "%s: %s set load (ua=%d) failed, err=%d\n",
7147 __func__
, vreg
->name
, ua
, ret
);
7153 static inline int ufshcd_config_vreg_lpm(struct ufs_hba
*hba
,
7154 struct ufs_vreg
*vreg
)
7156 return ufshcd_config_vreg_load(hba
->dev
, vreg
, UFS_VREG_LPM_LOAD_UA
);
7159 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
7160 struct ufs_vreg
*vreg
)
7165 return ufshcd_config_vreg_load(hba
->dev
, vreg
, vreg
->max_uA
);
7168 static int ufshcd_config_vreg(struct device
*dev
,
7169 struct ufs_vreg
*vreg
, bool on
)
7172 struct regulator
*reg
;
7174 int min_uV
, uA_load
;
7181 if (regulator_count_voltages(reg
) > 0) {
7182 if (vreg
->min_uV
&& vreg
->max_uV
) {
7183 min_uV
= on
? vreg
->min_uV
: 0;
7184 ret
= regulator_set_voltage(reg
, min_uV
, vreg
->max_uV
);
7187 "%s: %s set voltage failed, err=%d\n",
7188 __func__
, name
, ret
);
7193 uA_load
= on
? vreg
->max_uA
: 0;
7194 ret
= ufshcd_config_vreg_load(dev
, vreg
, uA_load
);
7202 static int ufshcd_enable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
7206 if (!vreg
|| vreg
->enabled
)
7209 ret
= ufshcd_config_vreg(dev
, vreg
, true);
7211 ret
= regulator_enable(vreg
->reg
);
7214 vreg
->enabled
= true;
7216 dev_err(dev
, "%s: %s enable failed, err=%d\n",
7217 __func__
, vreg
->name
, ret
);
7222 static int ufshcd_disable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
7226 if (!vreg
|| !vreg
->enabled
)
7229 ret
= regulator_disable(vreg
->reg
);
7232 /* ignore errors on applying disable config */
7233 ufshcd_config_vreg(dev
, vreg
, false);
7234 vreg
->enabled
= false;
7236 dev_err(dev
, "%s: %s disable failed, err=%d\n",
7237 __func__
, vreg
->name
, ret
);
7243 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
)
7246 struct device
*dev
= hba
->dev
;
7247 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
7249 ret
= ufshcd_toggle_vreg(dev
, info
->vcc
, on
);
7253 ret
= ufshcd_toggle_vreg(dev
, info
->vccq
, on
);
7257 ret
= ufshcd_toggle_vreg(dev
, info
->vccq2
, on
);
7263 ufshcd_toggle_vreg(dev
, info
->vccq2
, false);
7264 ufshcd_toggle_vreg(dev
, info
->vccq
, false);
7265 ufshcd_toggle_vreg(dev
, info
->vcc
, false);
7270 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
)
7272 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
7274 return ufshcd_toggle_vreg(hba
->dev
, info
->vdd_hba
, on
);
7277 static int ufshcd_get_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
7284 vreg
->reg
= devm_regulator_get(dev
, vreg
->name
);
7285 if (IS_ERR(vreg
->reg
)) {
7286 ret
= PTR_ERR(vreg
->reg
);
7287 dev_err(dev
, "%s: %s get failed, err=%d\n",
7288 __func__
, vreg
->name
, ret
);
7294 static int ufshcd_init_vreg(struct ufs_hba
*hba
)
7297 struct device
*dev
= hba
->dev
;
7298 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
7300 ret
= ufshcd_get_vreg(dev
, info
->vcc
);
7304 ret
= ufshcd_get_vreg(dev
, info
->vccq
);
7308 ret
= ufshcd_get_vreg(dev
, info
->vccq2
);
7313 static int ufshcd_init_hba_vreg(struct ufs_hba
*hba
)
7315 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
7318 return ufshcd_get_vreg(hba
->dev
, info
->vdd_hba
);
7323 static int __ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
,
7327 struct ufs_clk_info
*clki
;
7328 struct list_head
*head
= &hba
->clk_list_head
;
7329 unsigned long flags
;
7330 ktime_t start
= ktime_get();
7331 bool clk_state_changed
= false;
7333 if (list_empty(head
))
7337 * vendor specific setup_clocks ops may depend on clocks managed by
7338 * this standard driver hence call the vendor specific setup_clocks
7339 * before disabling the clocks managed here.
7342 ret
= ufshcd_vops_setup_clocks(hba
, on
, PRE_CHANGE
);
7347 list_for_each_entry(clki
, head
, list
) {
7348 if (!IS_ERR_OR_NULL(clki
->clk
)) {
7349 if (skip_ref_clk
&& !strcmp(clki
->name
, "ref_clk"))
7352 clk_state_changed
= on
^ clki
->enabled
;
7353 if (on
&& !clki
->enabled
) {
7354 ret
= clk_prepare_enable(clki
->clk
);
7356 dev_err(hba
->dev
, "%s: %s prepare enable failed, %d\n",
7357 __func__
, clki
->name
, ret
);
7360 } else if (!on
&& clki
->enabled
) {
7361 clk_disable_unprepare(clki
->clk
);
7364 dev_dbg(hba
->dev
, "%s: clk: %s %sabled\n", __func__
,
7365 clki
->name
, on
? "en" : "dis");
7370 * vendor specific setup_clocks ops may depend on clocks managed by
7371 * this standard driver hence call the vendor specific setup_clocks
7372 * after enabling the clocks managed here.
7375 ret
= ufshcd_vops_setup_clocks(hba
, on
, POST_CHANGE
);
7382 list_for_each_entry(clki
, head
, list
) {
7383 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->enabled
)
7384 clk_disable_unprepare(clki
->clk
);
7386 } else if (!ret
&& on
) {
7387 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7388 hba
->clk_gating
.state
= CLKS_ON
;
7389 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
7390 hba
->clk_gating
.state
);
7391 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7394 if (clk_state_changed
)
7395 trace_ufshcd_profile_clk_gating(dev_name(hba
->dev
),
7396 (on
? "on" : "off"),
7397 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
7401 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
)
7403 return __ufshcd_setup_clocks(hba
, on
, false);
7406 static int ufshcd_init_clocks(struct ufs_hba
*hba
)
7409 struct ufs_clk_info
*clki
;
7410 struct device
*dev
= hba
->dev
;
7411 struct list_head
*head
= &hba
->clk_list_head
;
7413 if (list_empty(head
))
7416 list_for_each_entry(clki
, head
, list
) {
7420 clki
->clk
= devm_clk_get(dev
, clki
->name
);
7421 if (IS_ERR(clki
->clk
)) {
7422 ret
= PTR_ERR(clki
->clk
);
7423 dev_err(dev
, "%s: %s clk get failed, %d\n",
7424 __func__
, clki
->name
, ret
);
7429 * Parse device ref clk freq as per device tree "ref_clk".
7430 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
7431 * in ufshcd_alloc_host().
7433 if (!strcmp(clki
->name
, "ref_clk"))
7434 ufshcd_parse_dev_ref_clk_freq(hba
, clki
->clk
);
7436 if (clki
->max_freq
) {
7437 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
7439 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
7440 __func__
, clki
->name
,
7441 clki
->max_freq
, ret
);
7444 clki
->curr_freq
= clki
->max_freq
;
7446 dev_dbg(dev
, "%s: clk: %s, rate: %lu\n", __func__
,
7447 clki
->name
, clk_get_rate(clki
->clk
));
7453 static int ufshcd_variant_hba_init(struct ufs_hba
*hba
)
7460 err
= ufshcd_vops_init(hba
);
7464 err
= ufshcd_vops_setup_regulators(hba
, true);
7471 ufshcd_vops_exit(hba
);
7474 dev_err(hba
->dev
, "%s: variant %s init failed err %d\n",
7475 __func__
, ufshcd_get_var_name(hba
), err
);
7479 static void ufshcd_variant_hba_exit(struct ufs_hba
*hba
)
7484 ufshcd_vops_setup_regulators(hba
, false);
7486 ufshcd_vops_exit(hba
);
7489 static int ufshcd_hba_init(struct ufs_hba
*hba
)
7494 * Handle host controller power separately from the UFS device power
7495 * rails as it will help controlling the UFS host controller power
7496 * collapse easily which is different than UFS device power collapse.
7497 * Also, enable the host controller power before we go ahead with rest
7498 * of the initialization here.
7500 err
= ufshcd_init_hba_vreg(hba
);
7504 err
= ufshcd_setup_hba_vreg(hba
, true);
7508 err
= ufshcd_init_clocks(hba
);
7510 goto out_disable_hba_vreg
;
7512 err
= ufshcd_setup_clocks(hba
, true);
7514 goto out_disable_hba_vreg
;
7516 err
= ufshcd_init_vreg(hba
);
7518 goto out_disable_clks
;
7520 err
= ufshcd_setup_vreg(hba
, true);
7522 goto out_disable_clks
;
7524 err
= ufshcd_variant_hba_init(hba
);
7526 goto out_disable_vreg
;
7528 hba
->is_powered
= true;
7532 ufshcd_setup_vreg(hba
, false);
7534 ufshcd_setup_clocks(hba
, false);
7535 out_disable_hba_vreg
:
7536 ufshcd_setup_hba_vreg(hba
, false);
7541 static void ufshcd_hba_exit(struct ufs_hba
*hba
)
7543 if (hba
->is_powered
) {
7544 ufshcd_variant_hba_exit(hba
);
7545 ufshcd_setup_vreg(hba
, false);
7546 ufshcd_suspend_clkscaling(hba
);
7547 if (ufshcd_is_clkscaling_supported(hba
))
7549 ufshcd_suspend_clkscaling(hba
);
7550 ufshcd_setup_clocks(hba
, false);
7551 ufshcd_setup_hba_vreg(hba
, false);
7552 hba
->is_powered
= false;
7557 ufshcd_send_request_sense(struct ufs_hba
*hba
, struct scsi_device
*sdp
)
7559 unsigned char cmd
[6] = {REQUEST_SENSE
,
7568 buffer
= kzalloc(UFS_SENSE_SIZE
, GFP_KERNEL
);
7574 ret
= scsi_execute(sdp
, cmd
, DMA_FROM_DEVICE
, buffer
,
7575 UFS_SENSE_SIZE
, NULL
, NULL
,
7576 msecs_to_jiffies(1000), 3, 0, RQF_PM
, NULL
);
7578 pr_err("%s: failed with err %d\n", __func__
, ret
);
7586 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7588 * @hba: per adapter instance
7589 * @pwr_mode: device power mode to set
7591 * Returns 0 if requested power mode is set successfully
7592 * Returns non-zero if failed to set the requested power mode
7594 static int ufshcd_set_dev_pwr_mode(struct ufs_hba
*hba
,
7595 enum ufs_dev_pwr_mode pwr_mode
)
7597 unsigned char cmd
[6] = { START_STOP
};
7598 struct scsi_sense_hdr sshdr
;
7599 struct scsi_device
*sdp
;
7600 unsigned long flags
;
7603 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7604 sdp
= hba
->sdev_ufs_device
;
7606 ret
= scsi_device_get(sdp
);
7607 if (!ret
&& !scsi_device_online(sdp
)) {
7609 scsi_device_put(sdp
);
7614 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7620 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7621 * handling, which would wait for host to be resumed. Since we know
7622 * we are functional while we are here, skip host resume in error
7625 hba
->host
->eh_noresume
= 1;
7626 if (hba
->wlun_dev_clr_ua
) {
7627 ret
= ufshcd_send_request_sense(hba
, sdp
);
7630 /* Unit attention condition is cleared now */
7631 hba
->wlun_dev_clr_ua
= false;
7634 cmd
[4] = pwr_mode
<< 4;
7637 * Current function would be generally called from the power management
7638 * callbacks hence set the RQF_PM flag so that it doesn't resume the
7639 * already suspended childs.
7641 ret
= scsi_execute(sdp
, cmd
, DMA_NONE
, NULL
, 0, NULL
, &sshdr
,
7642 START_STOP_TIMEOUT
, 0, 0, RQF_PM
, NULL
);
7644 sdev_printk(KERN_WARNING
, sdp
,
7645 "START_STOP failed for power mode: %d, result %x\n",
7647 if (driver_byte(ret
) == DRIVER_SENSE
)
7648 scsi_print_sense_hdr(sdp
, NULL
, &sshdr
);
7652 hba
->curr_dev_pwr_mode
= pwr_mode
;
7654 scsi_device_put(sdp
);
7655 hba
->host
->eh_noresume
= 0;
7659 static int ufshcd_link_state_transition(struct ufs_hba
*hba
,
7660 enum uic_link_state req_link_state
,
7661 int check_for_bkops
)
7665 if (req_link_state
== hba
->uic_link_state
)
7668 if (req_link_state
== UIC_LINK_HIBERN8_STATE
) {
7669 ret
= ufshcd_uic_hibern8_enter(hba
);
7671 ufshcd_set_link_hibern8(hba
);
7676 * If autobkops is enabled, link can't be turned off because
7677 * turning off the link would also turn off the device.
7679 else if ((req_link_state
== UIC_LINK_OFF_STATE
) &&
7680 (!check_for_bkops
|| (check_for_bkops
&&
7681 !hba
->auto_bkops_enabled
))) {
7683 * Let's make sure that link is in low power mode, we are doing
7684 * this currently by putting the link in Hibern8. Otherway to
7685 * put the link in low power mode is to send the DME end point
7686 * to device and then send the DME reset command to local
7687 * unipro. But putting the link in hibern8 is much faster.
7689 ret
= ufshcd_uic_hibern8_enter(hba
);
7693 * Change controller state to "reset state" which
7694 * should also put the link in off/reset state
7696 ufshcd_hba_stop(hba
, true);
7698 * TODO: Check if we need any delay to make sure that
7699 * controller is reset
7701 ufshcd_set_link_off(hba
);
7708 static void ufshcd_vreg_set_lpm(struct ufs_hba
*hba
)
7711 * It seems some UFS devices may keep drawing more than sleep current
7712 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7713 * To avoid this situation, add 2ms delay before putting these UFS
7714 * rails in LPM mode.
7716 if (!ufshcd_is_link_active(hba
) &&
7717 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
)
7718 usleep_range(2000, 2100);
7721 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7724 * If UFS device and link is in OFF state, all power supplies (VCC,
7725 * VCCQ, VCCQ2) can be turned off if power on write protect is not
7726 * required. If UFS link is inactive (Hibern8 or OFF state) and device
7727 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7729 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7730 * in low power state which would save some power.
7732 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
7733 !hba
->dev_info
.is_lu_power_on_wp
) {
7734 ufshcd_setup_vreg(hba
, false);
7735 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
7736 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
7737 if (!ufshcd_is_link_active(hba
)) {
7738 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
7739 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq2
);
7744 static int ufshcd_vreg_set_hpm(struct ufs_hba
*hba
)
7748 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
7749 !hba
->dev_info
.is_lu_power_on_wp
) {
7750 ret
= ufshcd_setup_vreg(hba
, true);
7751 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
7752 if (!ret
&& !ufshcd_is_link_active(hba
)) {
7753 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
7756 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
7760 ret
= ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, true);
7765 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
7767 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
7772 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
)
7774 if (ufshcd_is_link_off(hba
))
7775 ufshcd_setup_hba_vreg(hba
, false);
7778 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
)
7780 if (ufshcd_is_link_off(hba
))
7781 ufshcd_setup_hba_vreg(hba
, true);
7785 * ufshcd_suspend - helper function for suspend operations
7786 * @hba: per adapter instance
7787 * @pm_op: desired low power operation type
7789 * This function will try to put the UFS device and link into low power
7790 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7791 * (System PM level).
7793 * If this function is called during shutdown, it will make sure that
7794 * both UFS device and UFS link is powered off.
7796 * NOTE: UFS device & link must be active before we enter in this function.
7798 * Returns 0 for success and non-zero for failure
7800 static int ufshcd_suspend(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
7803 enum ufs_pm_level pm_lvl
;
7804 enum ufs_dev_pwr_mode req_dev_pwr_mode
;
7805 enum uic_link_state req_link_state
;
7807 hba
->pm_op_in_progress
= 1;
7808 if (!ufshcd_is_shutdown_pm(pm_op
)) {
7809 pm_lvl
= ufshcd_is_runtime_pm(pm_op
) ?
7810 hba
->rpm_lvl
: hba
->spm_lvl
;
7811 req_dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl
);
7812 req_link_state
= ufs_get_pm_lvl_to_link_pwr_state(pm_lvl
);
7814 req_dev_pwr_mode
= UFS_POWERDOWN_PWR_MODE
;
7815 req_link_state
= UIC_LINK_OFF_STATE
;
7819 * If we can't transition into any of the low power modes
7820 * just gate the clocks.
7822 ufshcd_hold(hba
, false);
7823 hba
->clk_gating
.is_suspended
= true;
7825 if (hba
->clk_scaling
.is_allowed
) {
7826 cancel_work_sync(&hba
->clk_scaling
.suspend_work
);
7827 cancel_work_sync(&hba
->clk_scaling
.resume_work
);
7828 ufshcd_suspend_clkscaling(hba
);
7831 if (req_dev_pwr_mode
== UFS_ACTIVE_PWR_MODE
&&
7832 req_link_state
== UIC_LINK_ACTIVE_STATE
) {
7836 if ((req_dev_pwr_mode
== hba
->curr_dev_pwr_mode
) &&
7837 (req_link_state
== hba
->uic_link_state
))
7840 /* UFS device & link must be active before we enter in this function */
7841 if (!ufshcd_is_ufs_dev_active(hba
) || !ufshcd_is_link_active(hba
)) {
7846 if (ufshcd_is_runtime_pm(pm_op
)) {
7847 if (ufshcd_can_autobkops_during_suspend(hba
)) {
7849 * The device is idle with no requests in the queue,
7850 * allow background operations if bkops status shows
7851 * that performance might be impacted.
7853 ret
= ufshcd_urgent_bkops(hba
);
7857 /* make sure that auto bkops is disabled */
7858 ufshcd_disable_auto_bkops(hba
);
7862 if ((req_dev_pwr_mode
!= hba
->curr_dev_pwr_mode
) &&
7863 ((ufshcd_is_runtime_pm(pm_op
) && !hba
->auto_bkops_enabled
) ||
7864 !ufshcd_is_runtime_pm(pm_op
))) {
7865 /* ensure that bkops is disabled */
7866 ufshcd_disable_auto_bkops(hba
);
7867 ret
= ufshcd_set_dev_pwr_mode(hba
, req_dev_pwr_mode
);
7872 ret
= ufshcd_link_state_transition(hba
, req_link_state
, 1);
7874 goto set_dev_active
;
7876 ufshcd_vreg_set_lpm(hba
);
7880 * Call vendor specific suspend callback. As these callbacks may access
7881 * vendor specific host controller register space call them before the
7882 * host clocks are ON.
7884 ret
= ufshcd_vops_suspend(hba
, pm_op
);
7886 goto set_link_active
;
7888 if (!ufshcd_is_link_active(hba
))
7889 ufshcd_setup_clocks(hba
, false);
7891 /* If link is active, device ref_clk can't be switched off */
7892 __ufshcd_setup_clocks(hba
, false, true);
7894 hba
->clk_gating
.state
= CLKS_OFF
;
7895 trace_ufshcd_clk_gating(dev_name(hba
->dev
), hba
->clk_gating
.state
);
7897 * Disable the host irq as host controller as there won't be any
7898 * host controller transaction expected till resume.
7900 ufshcd_disable_irq(hba
);
7901 /* Put the host controller in low power mode if possible */
7902 ufshcd_hba_vreg_set_lpm(hba
);
7906 if (hba
->clk_scaling
.is_allowed
)
7907 ufshcd_resume_clkscaling(hba
);
7908 ufshcd_vreg_set_hpm(hba
);
7909 if (ufshcd_is_link_hibern8(hba
) && !ufshcd_uic_hibern8_exit(hba
))
7910 ufshcd_set_link_active(hba
);
7911 else if (ufshcd_is_link_off(hba
))
7912 ufshcd_host_reset_and_restore(hba
);
7914 if (!ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
))
7915 ufshcd_disable_auto_bkops(hba
);
7917 if (hba
->clk_scaling
.is_allowed
)
7918 ufshcd_resume_clkscaling(hba
);
7919 hba
->clk_gating
.is_suspended
= false;
7920 ufshcd_release(hba
);
7922 hba
->pm_op_in_progress
= 0;
7924 ufshcd_update_reg_hist(&hba
->ufs_stats
.suspend_err
, (u32
)ret
);
7929 * ufshcd_resume - helper function for resume operations
7930 * @hba: per adapter instance
7931 * @pm_op: runtime PM or system PM
7933 * This function basically brings the UFS device, UniPro link and controller
7936 * Returns 0 for success and non-zero for failure
7938 static int ufshcd_resume(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
7941 enum uic_link_state old_link_state
;
7943 hba
->pm_op_in_progress
= 1;
7944 old_link_state
= hba
->uic_link_state
;
7946 ufshcd_hba_vreg_set_hpm(hba
);
7947 /* Make sure clocks are enabled before accessing controller */
7948 ret
= ufshcd_setup_clocks(hba
, true);
7952 /* enable the host irq as host controller would be active soon */
7953 ret
= ufshcd_enable_irq(hba
);
7955 goto disable_irq_and_vops_clks
;
7957 ret
= ufshcd_vreg_set_hpm(hba
);
7959 goto disable_irq_and_vops_clks
;
7962 * Call vendor specific resume callback. As these callbacks may access
7963 * vendor specific host controller register space call them when the
7964 * host clocks are ON.
7966 ret
= ufshcd_vops_resume(hba
, pm_op
);
7970 if (ufshcd_is_link_hibern8(hba
)) {
7971 ret
= ufshcd_uic_hibern8_exit(hba
);
7973 ufshcd_set_link_active(hba
);
7975 goto vendor_suspend
;
7976 } else if (ufshcd_is_link_off(hba
)) {
7977 ret
= ufshcd_host_reset_and_restore(hba
);
7979 * ufshcd_host_reset_and_restore() should have already
7980 * set the link state as active
7982 if (ret
|| !ufshcd_is_link_active(hba
))
7983 goto vendor_suspend
;
7986 if (!ufshcd_is_ufs_dev_active(hba
)) {
7987 ret
= ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
);
7989 goto set_old_link_state
;
7992 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
))
7993 ufshcd_enable_auto_bkops(hba
);
7996 * If BKOPs operations are urgently needed at this moment then
7997 * keep auto-bkops enabled or else disable it.
7999 ufshcd_urgent_bkops(hba
);
8001 hba
->clk_gating
.is_suspended
= false;
8003 if (hba
->clk_scaling
.is_allowed
)
8004 ufshcd_resume_clkscaling(hba
);
8006 /* Enable Auto-Hibernate if configured */
8007 ufshcd_auto_hibern8_enable(hba
);
8009 /* Schedule clock gating in case of no access to UFS device yet */
8010 ufshcd_release(hba
);
8015 ufshcd_link_state_transition(hba
, old_link_state
, 0);
8017 ufshcd_vops_suspend(hba
, pm_op
);
8019 ufshcd_vreg_set_lpm(hba
);
8020 disable_irq_and_vops_clks
:
8021 ufshcd_disable_irq(hba
);
8022 if (hba
->clk_scaling
.is_allowed
)
8023 ufshcd_suspend_clkscaling(hba
);
8024 ufshcd_setup_clocks(hba
, false);
8026 hba
->pm_op_in_progress
= 0;
8028 ufshcd_update_reg_hist(&hba
->ufs_stats
.resume_err
, (u32
)ret
);
8033 * ufshcd_system_suspend - system suspend routine
8034 * @hba: per adapter instance
8036 * Check the description of ufshcd_suspend() function for more details.
8038 * Returns 0 for success and non-zero for failure
8040 int ufshcd_system_suspend(struct ufs_hba
*hba
)
8043 ktime_t start
= ktime_get();
8045 if (!hba
|| !hba
->is_powered
)
8048 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba
->spm_lvl
) ==
8049 hba
->curr_dev_pwr_mode
) &&
8050 (ufs_get_pm_lvl_to_link_pwr_state(hba
->spm_lvl
) ==
8051 hba
->uic_link_state
))
8054 if (pm_runtime_suspended(hba
->dev
)) {
8056 * UFS device and/or UFS link low power states during runtime
8057 * suspend seems to be different than what is expected during
8058 * system suspend. Hence runtime resume the devic & link and
8059 * let the system suspend low power states to take effect.
8060 * TODO: If resume takes longer time, we might have optimize
8061 * it in future by not resuming everything if possible.
8063 ret
= ufshcd_runtime_resume(hba
);
8068 ret
= ufshcd_suspend(hba
, UFS_SYSTEM_PM
);
8070 trace_ufshcd_system_suspend(dev_name(hba
->dev
), ret
,
8071 ktime_to_us(ktime_sub(ktime_get(), start
)),
8072 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
8074 hba
->is_sys_suspended
= true;
8077 EXPORT_SYMBOL(ufshcd_system_suspend
);
8080 * ufshcd_system_resume - system resume routine
8081 * @hba: per adapter instance
8083 * Returns 0 for success and non-zero for failure
8086 int ufshcd_system_resume(struct ufs_hba
*hba
)
8089 ktime_t start
= ktime_get();
8094 if (!hba
->is_powered
|| pm_runtime_suspended(hba
->dev
))
8096 * Let the runtime resume take care of resuming
8097 * if runtime suspended.
8101 ret
= ufshcd_resume(hba
, UFS_SYSTEM_PM
);
8103 trace_ufshcd_system_resume(dev_name(hba
->dev
), ret
,
8104 ktime_to_us(ktime_sub(ktime_get(), start
)),
8105 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
8107 hba
->is_sys_suspended
= false;
8110 EXPORT_SYMBOL(ufshcd_system_resume
);
8113 * ufshcd_runtime_suspend - runtime suspend routine
8114 * @hba: per adapter instance
8116 * Check the description of ufshcd_suspend() function for more details.
8118 * Returns 0 for success and non-zero for failure
8120 int ufshcd_runtime_suspend(struct ufs_hba
*hba
)
8123 ktime_t start
= ktime_get();
8128 if (!hba
->is_powered
)
8131 ret
= ufshcd_suspend(hba
, UFS_RUNTIME_PM
);
8133 trace_ufshcd_runtime_suspend(dev_name(hba
->dev
), ret
,
8134 ktime_to_us(ktime_sub(ktime_get(), start
)),
8135 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
8138 EXPORT_SYMBOL(ufshcd_runtime_suspend
);
8141 * ufshcd_runtime_resume - runtime resume routine
8142 * @hba: per adapter instance
8144 * This function basically brings the UFS device, UniPro link and controller
8145 * to active state. Following operations are done in this function:
8147 * 1. Turn on all the controller related clocks
8148 * 2. Bring the UniPro link out of Hibernate state
8149 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8151 * 4. If auto-bkops is enabled on the device, disable it.
8153 * So following would be the possible power state after this function return
8155 * S1: UFS device in Active state with VCC rail ON
8156 * UniPro link in Active state
8157 * All the UFS/UniPro controller clocks are ON
8159 * Returns 0 for success and non-zero for failure
8161 int ufshcd_runtime_resume(struct ufs_hba
*hba
)
8164 ktime_t start
= ktime_get();
8169 if (!hba
->is_powered
)
8172 ret
= ufshcd_resume(hba
, UFS_RUNTIME_PM
);
8174 trace_ufshcd_runtime_resume(dev_name(hba
->dev
), ret
,
8175 ktime_to_us(ktime_sub(ktime_get(), start
)),
8176 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
8179 EXPORT_SYMBOL(ufshcd_runtime_resume
);
8181 int ufshcd_runtime_idle(struct ufs_hba
*hba
)
8185 EXPORT_SYMBOL(ufshcd_runtime_idle
);
8188 * ufshcd_shutdown - shutdown routine
8189 * @hba: per adapter instance
8191 * This function would power off both UFS device and UFS link.
8193 * Returns 0 always to allow force shutdown even in case of errors.
8195 int ufshcd_shutdown(struct ufs_hba
*hba
)
8199 if (!hba
->is_powered
)
8202 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
))
8205 pm_runtime_get_sync(hba
->dev
);
8207 ret
= ufshcd_suspend(hba
, UFS_SHUTDOWN_PM
);
8210 dev_err(hba
->dev
, "%s failed, err %d\n", __func__
, ret
);
8211 /* allow force shutdown even in case of errors */
8214 EXPORT_SYMBOL(ufshcd_shutdown
);
8217 * ufshcd_remove - de-allocate SCSI host and host memory space
8218 * data structure memory
8219 * @hba: per adapter instance
8221 void ufshcd_remove(struct ufs_hba
*hba
)
8223 ufs_bsg_remove(hba
);
8224 ufs_sysfs_remove_nodes(hba
->dev
);
8225 blk_cleanup_queue(hba
->tmf_queue
);
8226 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
8227 blk_cleanup_queue(hba
->cmd_queue
);
8228 scsi_remove_host(hba
->host
);
8229 /* disable interrupts */
8230 ufshcd_disable_intr(hba
, hba
->intr_mask
);
8231 ufshcd_hba_stop(hba
, true);
8233 ufshcd_exit_clk_scaling(hba
);
8234 ufshcd_exit_clk_gating(hba
);
8235 if (ufshcd_is_clkscaling_supported(hba
))
8236 device_remove_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
);
8237 ufshcd_hba_exit(hba
);
8239 EXPORT_SYMBOL_GPL(ufshcd_remove
);
8242 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8243 * @hba: pointer to Host Bus Adapter (HBA)
8245 void ufshcd_dealloc_host(struct ufs_hba
*hba
)
8247 scsi_host_put(hba
->host
);
8249 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host
);
8252 * ufshcd_set_dma_mask - Set dma mask based on the controller
8253 * addressing capability
8254 * @hba: per adapter instance
8256 * Returns 0 for success, non-zero for failure
8258 static int ufshcd_set_dma_mask(struct ufs_hba
*hba
)
8260 if (hba
->capabilities
& MASK_64_ADDRESSING_SUPPORT
) {
8261 if (!dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(64)))
8264 return dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(32));
8268 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
8269 * @dev: pointer to device handle
8270 * @hba_handle: driver private handle
8271 * Returns 0 on success, non-zero value on failure
8273 int ufshcd_alloc_host(struct device
*dev
, struct ufs_hba
**hba_handle
)
8275 struct Scsi_Host
*host
;
8276 struct ufs_hba
*hba
;
8281 "Invalid memory reference for dev is NULL\n");
8286 host
= scsi_host_alloc(&ufshcd_driver_template
,
8287 sizeof(struct ufs_hba
));
8289 dev_err(dev
, "scsi_host_alloc failed\n");
8293 hba
= shost_priv(host
);
8297 hba
->dev_ref_clk_freq
= REF_CLK_FREQ_INVAL
;
8299 INIT_LIST_HEAD(&hba
->clk_list_head
);
8304 EXPORT_SYMBOL(ufshcd_alloc_host
);
8306 /* This function exists because blk_mq_alloc_tag_set() requires this. */
8307 static blk_status_t
ufshcd_queue_tmf(struct blk_mq_hw_ctx
*hctx
,
8308 const struct blk_mq_queue_data
*qd
)
8311 return BLK_STS_NOTSUPP
;
8314 static const struct blk_mq_ops ufshcd_tmf_ops
= {
8315 .queue_rq
= ufshcd_queue_tmf
,
8319 * ufshcd_init - Driver initialization routine
8320 * @hba: per-adapter instance
8321 * @mmio_base: base register address
8322 * @irq: Interrupt line of device
8323 * Returns 0 on success, non-zero value on failure
8325 int ufshcd_init(struct ufs_hba
*hba
, void __iomem
*mmio_base
, unsigned int irq
)
8328 struct Scsi_Host
*host
= hba
->host
;
8329 struct device
*dev
= hba
->dev
;
8332 * dev_set_drvdata() must be called before any callbacks are registered
8333 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
8336 dev_set_drvdata(dev
, hba
);
8340 "Invalid memory reference for mmio_base is NULL\n");
8345 hba
->mmio_base
= mmio_base
;
8348 err
= ufshcd_hba_init(hba
);
8352 /* Read capabilities registers */
8353 ufshcd_hba_capabilities(hba
);
8355 /* Get UFS version supported by the controller */
8356 hba
->ufs_version
= ufshcd_get_ufs_version(hba
);
8358 if ((hba
->ufs_version
!= UFSHCI_VERSION_10
) &&
8359 (hba
->ufs_version
!= UFSHCI_VERSION_11
) &&
8360 (hba
->ufs_version
!= UFSHCI_VERSION_20
) &&
8361 (hba
->ufs_version
!= UFSHCI_VERSION_21
))
8362 dev_err(hba
->dev
, "invalid UFS version 0x%x\n",
8365 /* Get Interrupt bit mask per version */
8366 hba
->intr_mask
= ufshcd_get_intr_mask(hba
);
8368 err
= ufshcd_set_dma_mask(hba
);
8370 dev_err(hba
->dev
, "set dma mask failed\n");
8374 /* Allocate memory for host memory space */
8375 err
= ufshcd_memory_alloc(hba
);
8377 dev_err(hba
->dev
, "Memory allocation failed\n");
8382 ufshcd_host_memory_configure(hba
);
8384 host
->can_queue
= hba
->nutrs
;
8385 host
->cmd_per_lun
= hba
->nutrs
;
8386 host
->max_id
= UFSHCD_MAX_ID
;
8387 host
->max_lun
= UFS_MAX_LUNS
;
8388 host
->max_channel
= UFSHCD_MAX_CHANNEL
;
8389 host
->unique_id
= host
->host_no
;
8390 host
->max_cmd_len
= UFS_CDB_SIZE
;
8392 hba
->max_pwr_info
.is_valid
= false;
8394 /* Initialize work queues */
8395 INIT_WORK(&hba
->eh_work
, ufshcd_err_handler
);
8396 INIT_WORK(&hba
->eeh_work
, ufshcd_exception_event_handler
);
8398 /* Initialize UIC command mutex */
8399 mutex_init(&hba
->uic_cmd_mutex
);
8401 /* Initialize mutex for device management commands */
8402 mutex_init(&hba
->dev_cmd
.lock
);
8404 init_rwsem(&hba
->clk_scaling_lock
);
8406 ufshcd_init_clk_gating(hba
);
8408 ufshcd_init_clk_scaling(hba
);
8411 * In order to avoid any spurious interrupt immediately after
8412 * registering UFS controller interrupt handler, clear any pending UFS
8413 * interrupt status and disable all the UFS interrupts.
8415 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_INTERRUPT_STATUS
),
8416 REG_INTERRUPT_STATUS
);
8417 ufshcd_writel(hba
, 0, REG_INTERRUPT_ENABLE
);
8419 * Make sure that UFS interrupts are disabled and any pending interrupt
8420 * status is cleared before registering UFS interrupt handler.
8424 /* IRQ registration */
8425 err
= devm_request_irq(dev
, irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
, hba
);
8427 dev_err(hba
->dev
, "request irq failed\n");
8430 hba
->is_irq_enabled
= true;
8433 err
= scsi_add_host(host
, hba
->dev
);
8435 dev_err(hba
->dev
, "scsi_add_host failed\n");
8439 hba
->cmd_queue
= blk_mq_init_queue(&hba
->host
->tag_set
);
8440 if (IS_ERR(hba
->cmd_queue
)) {
8441 err
= PTR_ERR(hba
->cmd_queue
);
8442 goto out_remove_scsi_host
;
8445 hba
->tmf_tag_set
= (struct blk_mq_tag_set
) {
8447 .queue_depth
= hba
->nutmrs
,
8448 .ops
= &ufshcd_tmf_ops
,
8449 .flags
= BLK_MQ_F_NO_SCHED
,
8451 err
= blk_mq_alloc_tag_set(&hba
->tmf_tag_set
);
8453 goto free_cmd_queue
;
8454 hba
->tmf_queue
= blk_mq_init_queue(&hba
->tmf_tag_set
);
8455 if (IS_ERR(hba
->tmf_queue
)) {
8456 err
= PTR_ERR(hba
->tmf_queue
);
8457 goto free_tmf_tag_set
;
8460 /* Reset the attached device */
8461 ufshcd_vops_device_reset(hba
);
8463 /* Host controller enable */
8464 err
= ufshcd_hba_enable(hba
);
8466 dev_err(hba
->dev
, "Host controller enable failed\n");
8467 ufshcd_print_host_regs(hba
);
8468 ufshcd_print_host_state(hba
);
8469 goto free_tmf_queue
;
8473 * Set the default power management level for runtime and system PM.
8474 * Default power saving mode is to keep UFS link in Hibern8 state
8475 * and UFS device in sleep state.
8477 hba
->rpm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
8479 UIC_LINK_HIBERN8_STATE
);
8480 hba
->spm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
8482 UIC_LINK_HIBERN8_STATE
);
8484 /* Set the default auto-hiberate idle timer value to 150 ms */
8485 if (ufshcd_is_auto_hibern8_supported(hba
) && !hba
->ahit
) {
8486 hba
->ahit
= FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK
, 150) |
8487 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK
, 3);
8490 /* Hold auto suspend until async scan completes */
8491 pm_runtime_get_sync(dev
);
8492 atomic_set(&hba
->scsi_block_reqs_cnt
, 0);
8494 * We are assuming that device wasn't put in sleep/power-down
8495 * state exclusively during the boot stage before kernel.
8496 * This assumption helps avoid doing link startup twice during
8497 * ufshcd_probe_hba().
8499 ufshcd_set_ufs_dev_active(hba
);
8501 async_schedule(ufshcd_async_scan
, hba
);
8502 ufs_sysfs_add_nodes(hba
->dev
);
8507 blk_cleanup_queue(hba
->tmf_queue
);
8509 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
8511 blk_cleanup_queue(hba
->cmd_queue
);
8512 out_remove_scsi_host
:
8513 scsi_remove_host(hba
->host
);
8515 ufshcd_exit_clk_scaling(hba
);
8516 ufshcd_exit_clk_gating(hba
);
8518 hba
->is_irq_enabled
= false;
8519 ufshcd_hba_exit(hba
);
8523 EXPORT_SYMBOL_GPL(ufshcd_init
);
8525 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8526 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8527 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8528 MODULE_LICENSE("GPL");
8529 MODULE_VERSION(UFSHCD_DRIVER_VERSION
);