]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/scsi/ufs/ufshcd.c
scsi: ufs: disable irq before disabling clocks
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / ufs / ufshcd.c
CommitLineData
7a3e97b0 1/*
e0eca63e 2 * Universal Flash Storage Host controller driver Core
7a3e97b0
SY
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
3b1d0580 5 * Copyright (C) 2011-2013 Samsung India Software Operations
52ac95fe 6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7a3e97b0 7 *
3b1d0580
VH
8 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
7a3e97b0
SY
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
3b1d0580
VH
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
7a3e97b0
SY
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
3b1d0580
VH
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
5c0c28a8
SRT
35 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
7a3e97b0
SY
38 */
39
6ccf44fe 40#include <linux/async.h>
856b3483 41#include <linux/devfreq.h>
b573d484 42#include <linux/nls.h>
54b879b7 43#include <linux/of.h>
ad448378 44#include <linux/bitfield.h>
e0eca63e 45#include "ufshcd.h"
c58ab7aa 46#include "ufs_quirks.h"
53b3d9c3 47#include "unipro.h"
cbb6813e 48#include "ufs-sysfs.h"
df032bf2 49#include "ufs_bsg.h"
7a3e97b0 50
7ff5ab47
SJ
51#define CREATE_TRACE_POINTS
52#include <trace/events/ufs.h>
53
2fbd009b
SJ
54#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
55 UTP_TASK_REQ_COMPL |\
56 UFSHCD_ERROR_MASK)
6ccf44fe
SJ
57/* UIC command timeout, unit: ms */
58#define UIC_CMD_TIMEOUT 500
2fbd009b 59
5a0b0cb9
SRT
60/* NOP OUT retries waiting for NOP IN response */
61#define NOP_OUT_RETRIES 10
62/* Timeout after 30 msecs if NOP OUT hangs without response */
63#define NOP_OUT_TIMEOUT 30 /* msecs */
64
68078d5c 65/* Query request retries */
10fe5888 66#define QUERY_REQ_RETRIES 3
68078d5c 67/* Query request timeout */
10fe5888 68#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
68078d5c 69
e2933132
SRT
70/* Task management command timeout */
71#define TM_CMD_TIMEOUT 100 /* msecs */
72
64238fbd
YG
73/* maximum number of retries for a general UIC command */
74#define UFS_UIC_COMMAND_RETRIES 3
75
1d337ec2
SRT
76/* maximum number of link-startup retries */
77#define DME_LINKSTARTUP_RETRIES 3
78
87d0b4a6
YG
79/* Maximum retries for Hibern8 enter */
80#define UIC_HIBERN8_ENTER_RETRIES 3
81
1d337ec2
SRT
82/* maximum number of reset retries before giving up */
83#define MAX_HOST_RESET_RETRIES 5
84
68078d5c
DR
85/* Expose the flag value from utp_upiu_query.value */
86#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
87
7d568652
SJ
88/* Interrupt aggregation default timeout, unit: 40us */
89#define INT_AGGR_DEF_TO 0x02
90
49615ba1
SC
91/* default delay of autosuspend: 2000 ms */
92#define RPM_AUTOSUSPEND_DELAY_MS 2000
93
aa497613
SRT
94#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
95 ({ \
96 int _ret; \
97 if (_on) \
98 _ret = ufshcd_enable_vreg(_dev, _vreg); \
99 else \
100 _ret = ufshcd_disable_vreg(_dev, _vreg); \
101 _ret; \
102 })
103
ba80917d
TW
104#define ufshcd_hex_dump(prefix_str, buf, len) do { \
105 size_t __len = (len); \
106 print_hex_dump(KERN_ERR, prefix_str, \
107 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
108 16, 4, buf, __len, false); \
109} while (0)
110
111int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
112 const char *prefix)
113{
d6724756
MG
114 u32 *regs;
115 size_t pos;
116
117 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
118 return -EINVAL;
ba80917d 119
cddaebaf 120 regs = kzalloc(len, GFP_ATOMIC);
ba80917d
TW
121 if (!regs)
122 return -ENOMEM;
123
d6724756
MG
124 for (pos = 0; pos < len; pos += 4)
125 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
126
ba80917d
TW
127 ufshcd_hex_dump(prefix, regs, len);
128 kfree(regs);
129
130 return 0;
131}
132EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
66cc820f 133
7a3e97b0
SY
134enum {
135 UFSHCD_MAX_CHANNEL = 0,
136 UFSHCD_MAX_ID = 1,
7a3e97b0
SY
137 UFSHCD_CMD_PER_LUN = 32,
138 UFSHCD_CAN_QUEUE = 32,
139};
140
141/* UFSHCD states */
142enum {
7a3e97b0
SY
143 UFSHCD_STATE_RESET,
144 UFSHCD_STATE_ERROR,
3441da7d 145 UFSHCD_STATE_OPERATIONAL,
141f8165 146 UFSHCD_STATE_EH_SCHEDULED,
3441da7d
SRT
147};
148
149/* UFSHCD error handling flags */
150enum {
151 UFSHCD_EH_IN_PROGRESS = (1 << 0),
7a3e97b0
SY
152};
153
e8e7f271
SRT
154/* UFSHCD UIC layer error flags */
155enum {
156 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
9a47ec7c
YG
157 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
158 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
159 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
160 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
161 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
e8e7f271
SRT
162};
163
3441da7d 164#define ufshcd_set_eh_in_progress(h) \
9c490d2d 165 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
3441da7d 166#define ufshcd_eh_in_progress(h) \
9c490d2d 167 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
3441da7d 168#define ufshcd_clear_eh_in_progress(h) \
9c490d2d 169 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
3441da7d 170
57d104c1
SJ
171#define ufshcd_set_ufs_dev_active(h) \
172 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
173#define ufshcd_set_ufs_dev_sleep(h) \
174 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
175#define ufshcd_set_ufs_dev_poweroff(h) \
176 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
177#define ufshcd_is_ufs_dev_active(h) \
178 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
179#define ufshcd_is_ufs_dev_sleep(h) \
180 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
181#define ufshcd_is_ufs_dev_poweroff(h) \
182 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
183
cbb6813e 184struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
57d104c1
SJ
185 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
186 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
187 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
188 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
189 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
190 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
191};
192
193static inline enum ufs_dev_pwr_mode
194ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
195{
196 return ufs_pm_lvl_states[lvl].dev_state;
197}
198
199static inline enum uic_link_state
200ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
201{
202 return ufs_pm_lvl_states[lvl].link_state;
203}
204
0c8f7586
SJ
205static inline enum ufs_pm_level
206ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
207 enum uic_link_state link_state)
208{
209 enum ufs_pm_level lvl;
210
211 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
212 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
213 (ufs_pm_lvl_states[lvl].link_state == link_state))
214 return lvl;
215 }
216
217 /* if no match found, return the level 0 */
218 return UFS_PM_LVL_0;
219}
220
56d4a186
SJ
221static struct ufs_dev_fix ufs_fixups[] = {
222 /* UFS cards deviations table */
223 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
224 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
56d4a186
SJ
225 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
226 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
56d4a186
SJ
227 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
228 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
229 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
230 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
231 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
232 UFS_DEVICE_QUIRK_PA_TACTIVATE),
233 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
234 UFS_DEVICE_QUIRK_PA_TACTIVATE),
56d4a186
SJ
235 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
236 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
8e4829c6
WL
237 UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
238 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
56d4a186
SJ
239
240 END_FIX
241};
242
9333d775 243static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
3441da7d 244static void ufshcd_async_scan(void *data, async_cookie_t cookie);
e8e7f271 245static int ufshcd_reset_and_restore(struct ufs_hba *hba);
e7d38257 246static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
e8e7f271 247static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
1d337ec2
SRT
248static void ufshcd_hba_exit(struct ufs_hba *hba);
249static int ufshcd_probe_hba(struct ufs_hba *hba);
1ab27c9c
ST
250static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
251 bool skip_ref_clk);
252static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
253static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
254static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
cad2e03d 255static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
57d104c1 256static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
fcb0c4b0
ST
257static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
258static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
401f1e44 259static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
fcb0c4b0 260static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
57d104c1 261static irqreturn_t ufshcd_intr(int irq, void *__hba);
874237f7
YG
262static int ufshcd_change_power_mode(struct ufs_hba *hba,
263 struct ufs_pa_layer_attr *pwr_mode);
14497328
YG
264static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
265{
266 return tag >= 0 && tag < hba->nutrs;
267}
57d104c1 268
5231d38c 269static inline void ufshcd_enable_irq(struct ufs_hba *hba)
57d104c1 270{
57d104c1 271 if (!hba->is_irq_enabled) {
5231d38c 272 enable_irq(hba->irq);
57d104c1
SJ
273 hba->is_irq_enabled = true;
274 }
57d104c1
SJ
275}
276
277static inline void ufshcd_disable_irq(struct ufs_hba *hba)
278{
279 if (hba->is_irq_enabled) {
5231d38c 280 disable_irq(hba->irq);
57d104c1
SJ
281 hba->is_irq_enabled = false;
282 }
283}
3441da7d 284
38135535
SJ
285static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
286{
287 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
288 scsi_unblock_requests(hba->host);
289}
290
291static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
292{
293 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
294 scsi_block_requests(hba->host);
295}
296
6667e6d9
OS
297static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
298 const char *str)
299{
300 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
301
302 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
303}
304
305static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
306 const char *str)
307{
308 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
309
310 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
311}
312
313static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
314 const char *str)
315{
6667e6d9 316 int off = (int)tag - hba->nutrs;
391e388f 317 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
6667e6d9 318
391e388f
CH
319 trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
320 &descp->input_param1);
6667e6d9
OS
321}
322
1a07f2d9
LS
323static void ufshcd_add_command_trace(struct ufs_hba *hba,
324 unsigned int tag, const char *str)
325{
326 sector_t lba = -1;
327 u8 opcode = 0;
328 u32 intr, doorbell;
e7c3b379 329 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
1a07f2d9
LS
330 int transfer_len = -1;
331
e7c3b379
OS
332 if (!trace_ufshcd_command_enabled()) {
333 /* trace UPIU W/O tracing command */
334 if (lrbp->cmd)
335 ufshcd_add_cmd_upiu_trace(hba, tag, str);
1a07f2d9 336 return;
e7c3b379 337 }
1a07f2d9
LS
338
339 if (lrbp->cmd) { /* data phase exists */
e7c3b379
OS
340 /* trace UPIU also */
341 ufshcd_add_cmd_upiu_trace(hba, tag, str);
1a07f2d9
LS
342 opcode = (u8)(*lrbp->cmd->cmnd);
343 if ((opcode == READ_10) || (opcode == WRITE_10)) {
344 /*
345 * Currently we only fully trace read(10) and write(10)
346 * commands
347 */
348 if (lrbp->cmd->request && lrbp->cmd->request->bio)
349 lba =
350 lrbp->cmd->request->bio->bi_iter.bi_sector;
351 transfer_len = be32_to_cpu(
352 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
353 }
354 }
355
356 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
357 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
358 trace_ufshcd_command(dev_name(hba->dev), str, tag,
359 doorbell, transfer_len, intr, lba, opcode);
360}
361
ff8e20c6
DR
362static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
363{
364 struct ufs_clk_info *clki;
365 struct list_head *head = &hba->clk_list_head;
366
566ec9ad 367 if (list_empty(head))
ff8e20c6
DR
368 return;
369
370 list_for_each_entry(clki, head, list) {
371 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
372 clki->max_freq)
373 dev_err(hba->dev, "clk: %s, rate: %u\n",
374 clki->name, clki->curr_freq);
375 }
376}
377
48d5b973
SC
378static void ufshcd_print_err_hist(struct ufs_hba *hba,
379 struct ufs_err_reg_hist *err_hist,
380 char *err_name)
ff8e20c6
DR
381{
382 int i;
27752647 383 bool found = false;
ff8e20c6 384
48d5b973
SC
385 for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
386 int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
ff8e20c6
DR
387
388 if (err_hist->reg[p] == 0)
389 continue;
c5397f13 390 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
ff8e20c6 391 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
27752647 392 found = true;
ff8e20c6 393 }
27752647
SC
394
395 if (!found)
48d5b973 396 dev_err(hba->dev, "No record of %s errors\n", err_name);
ff8e20c6
DR
397}
398
66cc820f
DR
399static void ufshcd_print_host_regs(struct ufs_hba *hba)
400{
ba80917d 401 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
66cc820f
DR
402 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
403 hba->ufs_version, hba->capabilities);
404 dev_err(hba->dev,
405 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
406 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
ff8e20c6
DR
407 dev_err(hba->dev,
408 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
409 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
410 hba->ufs_stats.hibern8_exit_cnt);
411
48d5b973
SC
412 ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
413 ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
414 ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
415 ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
416 ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
d3c615bf
SC
417 ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
418 "auto_hibern8_err");
8808b4e9
SC
419 ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
420 ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
421 "link_startup_fail");
422 ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
423 ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
424 "suspend_fail");
425 ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
426 ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
427 ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
ff8e20c6
DR
428
429 ufshcd_print_clk_freqs(hba);
430
431 if (hba->vops && hba->vops->dbg_register_dump)
432 hba->vops->dbg_register_dump(hba);
66cc820f
DR
433}
434
435static
436void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
437{
438 struct ufshcd_lrb *lrbp;
7fabb77b 439 int prdt_length;
66cc820f
DR
440 int tag;
441
442 for_each_set_bit(tag, &bitmap, hba->nutrs) {
443 lrbp = &hba->lrb[tag];
444
ff8e20c6
DR
445 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
446 tag, ktime_to_us(lrbp->issue_time_stamp));
09017188
ZL
447 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
448 tag, ktime_to_us(lrbp->compl_time_stamp));
ff8e20c6
DR
449 dev_err(hba->dev,
450 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
451 tag, (u64)lrbp->utrd_dma_addr);
452
66cc820f
DR
453 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
454 sizeof(struct utp_transfer_req_desc));
ff8e20c6
DR
455 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
456 (u64)lrbp->ucd_req_dma_addr);
66cc820f
DR
457 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
458 sizeof(struct utp_upiu_req));
ff8e20c6
DR
459 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
460 (u64)lrbp->ucd_rsp_dma_addr);
66cc820f
DR
461 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
462 sizeof(struct utp_upiu_rsp));
66cc820f 463
7fabb77b
GB
464 prdt_length = le16_to_cpu(
465 lrbp->utr_descriptor_ptr->prd_table_length);
466 dev_err(hba->dev,
467 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
468 tag, prdt_length,
469 (u64)lrbp->ucd_prdt_dma_addr);
470
471 if (pr_prdt)
66cc820f 472 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
7fabb77b 473 sizeof(struct ufshcd_sg_entry) * prdt_length);
66cc820f
DR
474 }
475}
476
477static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
478{
66cc820f
DR
479 int tag;
480
481 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
391e388f
CH
482 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
483
66cc820f 484 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
391e388f 485 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
66cc820f
DR
486 }
487}
488
6ba65588
GB
489static void ufshcd_print_host_state(struct ufs_hba *hba)
490{
491 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
7252a360
BVA
492 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
493 hba->outstanding_reqs, hba->outstanding_tasks);
6ba65588
GB
494 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
495 hba->saved_err, hba->saved_uic_err);
496 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
497 hba->curr_dev_pwr_mode, hba->uic_link_state);
498 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
499 hba->pm_op_in_progress, hba->is_sys_suspended);
500 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
501 hba->auto_bkops_enabled, hba->host->host_self_blocked);
502 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
503 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
504 hba->eh_flags, hba->req_abort_count);
505 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
506 hba->capabilities, hba->caps);
507 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
508 hba->dev_quirks);
509}
510
ff8e20c6
DR
511/**
512 * ufshcd_print_pwr_info - print power params as saved in hba
513 * power info
514 * @hba: per-adapter instance
515 */
516static void ufshcd_print_pwr_info(struct ufs_hba *hba)
517{
518 static const char * const names[] = {
519 "INVALID MODE",
520 "FAST MODE",
521 "SLOW_MODE",
522 "INVALID MODE",
523 "FASTAUTO_MODE",
524 "SLOWAUTO_MODE",
525 "INVALID MODE",
526 };
527
528 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
529 __func__,
530 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
531 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
532 names[hba->pwr_info.pwr_rx],
533 names[hba->pwr_info.pwr_tx],
534 hba->pwr_info.hs_rate);
535}
536
5a0b0cb9
SRT
537/*
538 * ufshcd_wait_for_register - wait for register value to change
539 * @hba - per-adapter interface
540 * @reg - mmio register offset
541 * @mask - mask to apply to read register value
542 * @val - wait condition
543 * @interval_us - polling interval in microsecs
544 * @timeout_ms - timeout in millisecs
596585a2 545 * @can_sleep - perform sleep or just spin
5a0b0cb9
SRT
546 *
547 * Returns -ETIMEDOUT on error, zero on success
548 */
596585a2
YG
549int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
550 u32 val, unsigned long interval_us,
551 unsigned long timeout_ms, bool can_sleep)
5a0b0cb9
SRT
552{
553 int err = 0;
554 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
555
556 /* ignore bits that we don't intend to wait on */
557 val = val & mask;
558
559 while ((ufshcd_readl(hba, reg) & mask) != val) {
596585a2
YG
560 if (can_sleep)
561 usleep_range(interval_us, interval_us + 50);
562 else
563 udelay(interval_us);
5a0b0cb9
SRT
564 if (time_after(jiffies, timeout)) {
565 if ((ufshcd_readl(hba, reg) & mask) != val)
566 err = -ETIMEDOUT;
567 break;
568 }
569 }
570
571 return err;
572}
573
2fbd009b
SJ
574/**
575 * ufshcd_get_intr_mask - Get the interrupt bit mask
8aa29f19 576 * @hba: Pointer to adapter instance
2fbd009b
SJ
577 *
578 * Returns interrupt bit mask per version
579 */
580static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
581{
c01848c6
YG
582 u32 intr_mask = 0;
583
584 switch (hba->ufs_version) {
585 case UFSHCI_VERSION_10:
586 intr_mask = INTERRUPT_MASK_ALL_VER_10;
587 break;
c01848c6
YG
588 case UFSHCI_VERSION_11:
589 case UFSHCI_VERSION_20:
590 intr_mask = INTERRUPT_MASK_ALL_VER_11;
591 break;
c01848c6
YG
592 case UFSHCI_VERSION_21:
593 default:
594 intr_mask = INTERRUPT_MASK_ALL_VER_21;
031d1e0f 595 break;
c01848c6
YG
596 }
597
598 return intr_mask;
2fbd009b
SJ
599}
600
7a3e97b0
SY
601/**
602 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
8aa29f19 603 * @hba: Pointer to adapter instance
7a3e97b0
SY
604 *
605 * Returns UFSHCI version supported by the controller
606 */
607static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
608{
0263bcd0
YG
609 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
610 return ufshcd_vops_get_ufs_hci_version(hba);
9949e702 611
b873a275 612 return ufshcd_readl(hba, REG_UFS_VERSION);
7a3e97b0
SY
613}
614
615/**
616 * ufshcd_is_device_present - Check if any device connected to
617 * the host controller
5c0c28a8 618 * @hba: pointer to adapter instance
7a3e97b0 619 *
c9e6010b 620 * Returns true if device present, false if no device detected
7a3e97b0 621 */
c9e6010b 622static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
7a3e97b0 623{
5c0c28a8 624 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
c9e6010b 625 DEVICE_PRESENT) ? true : false;
7a3e97b0
SY
626}
627
628/**
629 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
8aa29f19 630 * @lrbp: pointer to local command reference block
7a3e97b0
SY
631 *
632 * This function is used to get the OCS field from UTRD
633 * Returns the OCS field in the UTRD
634 */
635static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
636{
e8c8e82a 637 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
7a3e97b0
SY
638}
639
7a3e97b0
SY
640/**
641 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
642 * @hba: per adapter instance
643 * @pos: position of the bit to be cleared
644 */
645static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
646{
1399c5b0
AA
647 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
648 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
649 else
650 ufshcd_writel(hba, ~(1 << pos),
651 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
652}
653
654/**
655 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
656 * @hba: per adapter instance
657 * @pos: position of the bit to be cleared
658 */
659static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
660{
661 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
662 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
663 else
664 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
7a3e97b0
SY
665}
666
a48353f6
YG
667/**
668 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
669 * @hba: per adapter instance
670 * @tag: position of the bit to be cleared
671 */
672static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
673{
674 __clear_bit(tag, &hba->outstanding_reqs);
675}
676
7a3e97b0
SY
677/**
678 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
679 * @reg: Register value of host controller status
680 *
681 * Returns integer, 0 on Success and positive value if failed
682 */
683static inline int ufshcd_get_lists_status(u32 reg)
684{
6cf16115 685 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
7a3e97b0
SY
686}
687
688/**
689 * ufshcd_get_uic_cmd_result - Get the UIC command result
690 * @hba: Pointer to adapter instance
691 *
692 * This function gets the result of UIC command completion
693 * Returns 0 on success, non zero value on error
694 */
695static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
696{
b873a275 697 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
7a3e97b0
SY
698 MASK_UIC_COMMAND_RESULT;
699}
700
12b4fdb4
SJ
701/**
702 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
703 * @hba: Pointer to adapter instance
704 *
705 * This function gets UIC command argument3
706 * Returns 0 on success, non zero value on error
707 */
708static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
709{
710 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
711}
712
7a3e97b0 713/**
5a0b0cb9 714 * ufshcd_get_req_rsp - returns the TR response transaction type
7a3e97b0 715 * @ucd_rsp_ptr: pointer to response UPIU
7a3e97b0
SY
716 */
717static inline int
5a0b0cb9 718ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
7a3e97b0 719{
5a0b0cb9 720 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
7a3e97b0
SY
721}
722
723/**
724 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
725 * @ucd_rsp_ptr: pointer to response UPIU
726 *
727 * This function gets the response status and scsi_status from response UPIU
728 * Returns the response result code.
729 */
730static inline int
731ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
732{
733 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
734}
735
1c2623c5
SJ
736/*
737 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
738 * from response UPIU
739 * @ucd_rsp_ptr: pointer to response UPIU
740 *
741 * Return the data segment length.
742 */
743static inline unsigned int
744ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
745{
746 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
747 MASK_RSP_UPIU_DATA_SEG_LEN;
748}
749
66ec6d59
SRT
750/**
751 * ufshcd_is_exception_event - Check if the device raised an exception event
752 * @ucd_rsp_ptr: pointer to response UPIU
753 *
754 * The function checks if the device raised an exception event indicated in
755 * the Device Information field of response UPIU.
756 *
757 * Returns true if exception is raised, false otherwise.
758 */
759static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
760{
761 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
762 MASK_RSP_EXCEPTION_EVENT ? true : false;
763}
764
7a3e97b0 765/**
7d568652 766 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
7a3e97b0 767 * @hba: per adapter instance
7a3e97b0
SY
768 */
769static inline void
7d568652 770ufshcd_reset_intr_aggr(struct ufs_hba *hba)
7a3e97b0 771{
7d568652
SJ
772 ufshcd_writel(hba, INT_AGGR_ENABLE |
773 INT_AGGR_COUNTER_AND_TIMER_RESET,
774 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
775}
776
777/**
778 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
779 * @hba: per adapter instance
780 * @cnt: Interrupt aggregation counter threshold
781 * @tmout: Interrupt aggregation timeout value
782 */
783static inline void
784ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
785{
786 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
787 INT_AGGR_COUNTER_THLD_VAL(cnt) |
788 INT_AGGR_TIMEOUT_VAL(tmout),
789 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
7a3e97b0
SY
790}
791
b852190e
YG
792/**
793 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
794 * @hba: per adapter instance
795 */
796static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
797{
798 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
799}
800
7a3e97b0
SY
801/**
802 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
803 * When run-stop registers are set to 1, it indicates the
804 * host controller that it can process the requests
805 * @hba: per adapter instance
806 */
807static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
808{
b873a275
SJ
809 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
810 REG_UTP_TASK_REQ_LIST_RUN_STOP);
811 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
812 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
7a3e97b0
SY
813}
814
7a3e97b0
SY
815/**
816 * ufshcd_hba_start - Start controller initialization sequence
817 * @hba: per adapter instance
818 */
819static inline void ufshcd_hba_start(struct ufs_hba *hba)
820{
b873a275 821 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
7a3e97b0
SY
822}
823
824/**
825 * ufshcd_is_hba_active - Get controller state
826 * @hba: per adapter instance
827 *
c9e6010b 828 * Returns false if controller is active, true otherwise
7a3e97b0 829 */
c9e6010b 830static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
7a3e97b0 831{
4a8eec2b
TK
832 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
833 ? false : true;
7a3e97b0
SY
834}
835
37113106
YG
836u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
837{
838 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
839 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
840 (hba->ufs_version == UFSHCI_VERSION_11))
841 return UFS_UNIPRO_VER_1_41;
842 else
843 return UFS_UNIPRO_VER_1_6;
844}
845EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
846
847static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
848{
849 /*
850 * If both host and device support UniPro ver1.6 or later, PA layer
851 * parameters tuning happens during link startup itself.
852 *
853 * We can manually tune PA layer parameters if either host or device
854 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
855 * logic simple, we will only do manual tuning if local unipro version
856 * doesn't support ver1.6 or later.
857 */
858 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
859 return true;
860 else
861 return false;
862}
863
a3cd5ec5
SJ
864static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
865{
866 int ret = 0;
867 struct ufs_clk_info *clki;
868 struct list_head *head = &hba->clk_list_head;
869 ktime_t start = ktime_get();
870 bool clk_state_changed = false;
871
566ec9ad 872 if (list_empty(head))
a3cd5ec5
SJ
873 goto out;
874
875 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
876 if (ret)
877 return ret;
878
879 list_for_each_entry(clki, head, list) {
880 if (!IS_ERR_OR_NULL(clki->clk)) {
881 if (scale_up && clki->max_freq) {
882 if (clki->curr_freq == clki->max_freq)
883 continue;
884
885 clk_state_changed = true;
886 ret = clk_set_rate(clki->clk, clki->max_freq);
887 if (ret) {
888 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
889 __func__, clki->name,
890 clki->max_freq, ret);
891 break;
892 }
893 trace_ufshcd_clk_scaling(dev_name(hba->dev),
894 "scaled up", clki->name,
895 clki->curr_freq,
896 clki->max_freq);
897
898 clki->curr_freq = clki->max_freq;
899
900 } else if (!scale_up && clki->min_freq) {
901 if (clki->curr_freq == clki->min_freq)
902 continue;
903
904 clk_state_changed = true;
905 ret = clk_set_rate(clki->clk, clki->min_freq);
906 if (ret) {
907 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
908 __func__, clki->name,
909 clki->min_freq, ret);
910 break;
911 }
912 trace_ufshcd_clk_scaling(dev_name(hba->dev),
913 "scaled down", clki->name,
914 clki->curr_freq,
915 clki->min_freq);
916 clki->curr_freq = clki->min_freq;
917 }
918 }
919 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
920 clki->name, clk_get_rate(clki->clk));
921 }
922
923 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
924
925out:
926 if (clk_state_changed)
927 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
928 (scale_up ? "up" : "down"),
929 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
930 return ret;
931}
932
933/**
934 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
935 * @hba: per adapter instance
936 * @scale_up: True if scaling up and false if scaling down
937 *
938 * Returns true if scaling is required, false otherwise.
939 */
940static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
941 bool scale_up)
942{
943 struct ufs_clk_info *clki;
944 struct list_head *head = &hba->clk_list_head;
945
566ec9ad 946 if (list_empty(head))
a3cd5ec5
SJ
947 return false;
948
949 list_for_each_entry(clki, head, list) {
950 if (!IS_ERR_OR_NULL(clki->clk)) {
951 if (scale_up && clki->max_freq) {
952 if (clki->curr_freq == clki->max_freq)
953 continue;
954 return true;
955 } else if (!scale_up && clki->min_freq) {
956 if (clki->curr_freq == clki->min_freq)
957 continue;
958 return true;
959 }
960 }
961 }
962
963 return false;
964}
965
966static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
967 u64 wait_timeout_us)
968{
969 unsigned long flags;
970 int ret = 0;
971 u32 tm_doorbell;
972 u32 tr_doorbell;
973 bool timeout = false, do_last_check = false;
974 ktime_t start;
975
976 ufshcd_hold(hba, false);
977 spin_lock_irqsave(hba->host->host_lock, flags);
978 /*
979 * Wait for all the outstanding tasks/transfer requests.
980 * Verify by checking the doorbell registers are clear.
981 */
982 start = ktime_get();
983 do {
984 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
985 ret = -EBUSY;
986 goto out;
987 }
988
989 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
990 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
991 if (!tm_doorbell && !tr_doorbell) {
992 timeout = false;
993 break;
994 } else if (do_last_check) {
995 break;
996 }
997
998 spin_unlock_irqrestore(hba->host->host_lock, flags);
999 schedule();
1000 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1001 wait_timeout_us) {
1002 timeout = true;
1003 /*
1004 * We might have scheduled out for long time so make
1005 * sure to check if doorbells are cleared by this time
1006 * or not.
1007 */
1008 do_last_check = true;
1009 }
1010 spin_lock_irqsave(hba->host->host_lock, flags);
1011 } while (tm_doorbell || tr_doorbell);
1012
1013 if (timeout) {
1014 dev_err(hba->dev,
1015 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1016 __func__, tm_doorbell, tr_doorbell);
1017 ret = -EBUSY;
1018 }
1019out:
1020 spin_unlock_irqrestore(hba->host->host_lock, flags);
1021 ufshcd_release(hba);
1022 return ret;
1023}
1024
1025/**
1026 * ufshcd_scale_gear - scale up/down UFS gear
1027 * @hba: per adapter instance
1028 * @scale_up: True for scaling up gear and false for scaling down
1029 *
1030 * Returns 0 for success,
1031 * Returns -EBUSY if scaling can't happen at this time
1032 * Returns non-zero for any other errors
1033 */
1034static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1035{
1036 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1037 int ret = 0;
1038 struct ufs_pa_layer_attr new_pwr_info;
1039
1040 if (scale_up) {
1041 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1042 sizeof(struct ufs_pa_layer_attr));
1043 } else {
1044 memcpy(&new_pwr_info, &hba->pwr_info,
1045 sizeof(struct ufs_pa_layer_attr));
1046
1047 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1048 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1049 /* save the current power mode */
1050 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1051 &hba->pwr_info,
1052 sizeof(struct ufs_pa_layer_attr));
1053
1054 /* scale down gear */
1055 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1056 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1057 }
1058 }
1059
1060 /* check if the power mode needs to be changed or not? */
1061 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1062
1063 if (ret)
1064 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1065 __func__, ret,
1066 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1067 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1068
1069 return ret;
1070}
1071
1072static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1073{
1074 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1075 int ret = 0;
1076 /*
1077 * make sure that there are no outstanding requests when
1078 * clock scaling is in progress
1079 */
38135535 1080 ufshcd_scsi_block_requests(hba);
a3cd5ec5
SJ
1081 down_write(&hba->clk_scaling_lock);
1082 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1083 ret = -EBUSY;
1084 up_write(&hba->clk_scaling_lock);
38135535 1085 ufshcd_scsi_unblock_requests(hba);
a3cd5ec5
SJ
1086 }
1087
1088 return ret;
1089}
1090
1091static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1092{
1093 up_write(&hba->clk_scaling_lock);
38135535 1094 ufshcd_scsi_unblock_requests(hba);
a3cd5ec5
SJ
1095}
1096
1097/**
1098 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1099 * @hba: per adapter instance
1100 * @scale_up: True for scaling up and false for scalin down
1101 *
1102 * Returns 0 for success,
1103 * Returns -EBUSY if scaling can't happen at this time
1104 * Returns non-zero for any other errors
1105 */
1106static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1107{
1108 int ret = 0;
1109
401f1e44
SJ
1110 /* let's not get into low power until clock scaling is completed */
1111 ufshcd_hold(hba, false);
1112
a3cd5ec5
SJ
1113 ret = ufshcd_clock_scaling_prepare(hba);
1114 if (ret)
1115 return ret;
1116
1117 /* scale down the gear before scaling down clocks */
1118 if (!scale_up) {
1119 ret = ufshcd_scale_gear(hba, false);
1120 if (ret)
1121 goto out;
1122 }
1123
1124 ret = ufshcd_scale_clks(hba, scale_up);
1125 if (ret) {
1126 if (!scale_up)
1127 ufshcd_scale_gear(hba, true);
1128 goto out;
1129 }
1130
1131 /* scale up the gear after scaling up clocks */
1132 if (scale_up) {
1133 ret = ufshcd_scale_gear(hba, true);
1134 if (ret) {
1135 ufshcd_scale_clks(hba, false);
1136 goto out;
1137 }
1138 }
1139
1140 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1141
1142out:
1143 ufshcd_clock_scaling_unprepare(hba);
401f1e44 1144 ufshcd_release(hba);
a3cd5ec5
SJ
1145 return ret;
1146}
1147
401f1e44
SJ
1148static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1149{
1150 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1151 clk_scaling.suspend_work);
1152 unsigned long irq_flags;
1153
1154 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1155 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1156 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1157 return;
1158 }
1159 hba->clk_scaling.is_suspended = true;
1160 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1161
1162 __ufshcd_suspend_clkscaling(hba);
1163}
1164
1165static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1166{
1167 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1168 clk_scaling.resume_work);
1169 unsigned long irq_flags;
1170
1171 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1172 if (!hba->clk_scaling.is_suspended) {
1173 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1174 return;
1175 }
1176 hba->clk_scaling.is_suspended = false;
1177 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1178
1179 devfreq_resume_device(hba->devfreq);
1180}
1181
a3cd5ec5
SJ
1182static int ufshcd_devfreq_target(struct device *dev,
1183 unsigned long *freq, u32 flags)
1184{
1185 int ret = 0;
1186 struct ufs_hba *hba = dev_get_drvdata(dev);
1187 ktime_t start;
401f1e44 1188 bool scale_up, sched_clk_scaling_suspend_work = false;
092b4558
BA
1189 struct list_head *clk_list = &hba->clk_list_head;
1190 struct ufs_clk_info *clki;
a3cd5ec5
SJ
1191 unsigned long irq_flags;
1192
1193 if (!ufshcd_is_clkscaling_supported(hba))
1194 return -EINVAL;
1195
a3cd5ec5
SJ
1196 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1197 if (ufshcd_eh_in_progress(hba)) {
1198 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1199 return 0;
1200 }
1201
401f1e44
SJ
1202 if (!hba->clk_scaling.active_reqs)
1203 sched_clk_scaling_suspend_work = true;
1204
092b4558
BA
1205 if (list_empty(clk_list)) {
1206 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1207 goto out;
1208 }
1209
1210 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1211 scale_up = (*freq == clki->max_freq) ? true : false;
401f1e44
SJ
1212 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1213 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1214 ret = 0;
1215 goto out; /* no state change required */
a3cd5ec5
SJ
1216 }
1217 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1218
1219 start = ktime_get();
a3cd5ec5
SJ
1220 ret = ufshcd_devfreq_scale(hba, scale_up);
1221
a3cd5ec5
SJ
1222 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1223 (scale_up ? "up" : "down"),
1224 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1225
401f1e44
SJ
1226out:
1227 if (sched_clk_scaling_suspend_work)
1228 queue_work(hba->clk_scaling.workq,
1229 &hba->clk_scaling.suspend_work);
1230
a3cd5ec5
SJ
1231 return ret;
1232}
1233
7252a360
BVA
1234static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1235{
1236 int *busy = priv;
1237
1238 WARN_ON_ONCE(reserved);
1239 (*busy)++;
1240 return false;
1241}
1242
1243/* Whether or not any tag is in use by a request that is in progress. */
1244static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1245{
1246 struct request_queue *q = hba->cmd_queue;
1247 int busy = 0;
1248
1249 blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1250 return busy;
1251}
a3cd5ec5
SJ
1252
1253static int ufshcd_devfreq_get_dev_status(struct device *dev,
1254 struct devfreq_dev_status *stat)
1255{
1256 struct ufs_hba *hba = dev_get_drvdata(dev);
1257 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1258 unsigned long flags;
1259
1260 if (!ufshcd_is_clkscaling_supported(hba))
1261 return -EINVAL;
1262
1263 memset(stat, 0, sizeof(*stat));
1264
1265 spin_lock_irqsave(hba->host->host_lock, flags);
1266 if (!scaling->window_start_t)
1267 goto start_window;
1268
1269 if (scaling->is_busy_started)
1270 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1271 scaling->busy_start_t));
1272
1273 stat->total_time = jiffies_to_usecs((long)jiffies -
1274 (long)scaling->window_start_t);
1275 stat->busy_time = scaling->tot_busy_t;
1276start_window:
1277 scaling->window_start_t = jiffies;
1278 scaling->tot_busy_t = 0;
1279
1280 if (hba->outstanding_reqs) {
1281 scaling->busy_start_t = ktime_get();
1282 scaling->is_busy_started = true;
1283 } else {
1284 scaling->busy_start_t = 0;
1285 scaling->is_busy_started = false;
1286 }
1287 spin_unlock_irqrestore(hba->host->host_lock, flags);
1288 return 0;
1289}
1290
1291static struct devfreq_dev_profile ufs_devfreq_profile = {
1292 .polling_ms = 100,
1293 .target = ufshcd_devfreq_target,
1294 .get_dev_status = ufshcd_devfreq_get_dev_status,
1295};
1296
deac444f
BA
1297static int ufshcd_devfreq_init(struct ufs_hba *hba)
1298{
092b4558
BA
1299 struct list_head *clk_list = &hba->clk_list_head;
1300 struct ufs_clk_info *clki;
deac444f
BA
1301 struct devfreq *devfreq;
1302 int ret;
1303
092b4558
BA
1304 /* Skip devfreq if we don't have any clocks in the list */
1305 if (list_empty(clk_list))
1306 return 0;
1307
1308 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1309 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1310 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1311
1312 devfreq = devfreq_add_device(hba->dev,
deac444f
BA
1313 &ufs_devfreq_profile,
1314 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1315 NULL);
1316 if (IS_ERR(devfreq)) {
1317 ret = PTR_ERR(devfreq);
1318 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
092b4558
BA
1319
1320 dev_pm_opp_remove(hba->dev, clki->min_freq);
1321 dev_pm_opp_remove(hba->dev, clki->max_freq);
deac444f
BA
1322 return ret;
1323 }
1324
1325 hba->devfreq = devfreq;
1326
1327 return 0;
1328}
1329
092b4558
BA
1330static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1331{
1332 struct list_head *clk_list = &hba->clk_list_head;
1333 struct ufs_clk_info *clki;
1334
1335 if (!hba->devfreq)
1336 return;
1337
1338 devfreq_remove_device(hba->devfreq);
1339 hba->devfreq = NULL;
1340
1341 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1342 dev_pm_opp_remove(hba->dev, clki->min_freq);
1343 dev_pm_opp_remove(hba->dev, clki->max_freq);
1344}
1345
401f1e44
SJ
1346static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1347{
1348 unsigned long flags;
1349
1350 devfreq_suspend_device(hba->devfreq);
1351 spin_lock_irqsave(hba->host->host_lock, flags);
1352 hba->clk_scaling.window_start_t = 0;
1353 spin_unlock_irqrestore(hba->host->host_lock, flags);
1354}
a3cd5ec5 1355
a508253d
GB
1356static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1357{
401f1e44
SJ
1358 unsigned long flags;
1359 bool suspend = false;
1360
fcb0c4b0
ST
1361 if (!ufshcd_is_clkscaling_supported(hba))
1362 return;
1363
401f1e44
SJ
1364 spin_lock_irqsave(hba->host->host_lock, flags);
1365 if (!hba->clk_scaling.is_suspended) {
1366 suspend = true;
1367 hba->clk_scaling.is_suspended = true;
1368 }
1369 spin_unlock_irqrestore(hba->host->host_lock, flags);
1370
1371 if (suspend)
1372 __ufshcd_suspend_clkscaling(hba);
a508253d
GB
1373}
1374
1375static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1376{
401f1e44
SJ
1377 unsigned long flags;
1378 bool resume = false;
1379
1380 if (!ufshcd_is_clkscaling_supported(hba))
1381 return;
1382
1383 spin_lock_irqsave(hba->host->host_lock, flags);
1384 if (hba->clk_scaling.is_suspended) {
1385 resume = true;
1386 hba->clk_scaling.is_suspended = false;
1387 }
1388 spin_unlock_irqrestore(hba->host->host_lock, flags);
1389
1390 if (resume)
1391 devfreq_resume_device(hba->devfreq);
fcb0c4b0
ST
1392}
1393
1394static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1395 struct device_attribute *attr, char *buf)
1396{
1397 struct ufs_hba *hba = dev_get_drvdata(dev);
1398
1399 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1400}
1401
1402static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1403 struct device_attribute *attr, const char *buf, size_t count)
1404{
1405 struct ufs_hba *hba = dev_get_drvdata(dev);
1406 u32 value;
1407 int err;
1408
1409 if (kstrtou32(buf, 0, &value))
1410 return -EINVAL;
1411
1412 value = !!value;
1413 if (value == hba->clk_scaling.is_allowed)
1414 goto out;
1415
1416 pm_runtime_get_sync(hba->dev);
1417 ufshcd_hold(hba, false);
1418
401f1e44
SJ
1419 cancel_work_sync(&hba->clk_scaling.suspend_work);
1420 cancel_work_sync(&hba->clk_scaling.resume_work);
1421
1422 hba->clk_scaling.is_allowed = value;
1423
fcb0c4b0
ST
1424 if (value) {
1425 ufshcd_resume_clkscaling(hba);
1426 } else {
1427 ufshcd_suspend_clkscaling(hba);
a3cd5ec5 1428 err = ufshcd_devfreq_scale(hba, true);
fcb0c4b0
ST
1429 if (err)
1430 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1431 __func__, err);
1432 }
fcb0c4b0
ST
1433
1434 ufshcd_release(hba);
1435 pm_runtime_put_sync(hba->dev);
1436out:
1437 return count;
a508253d
GB
1438}
1439
a3cd5ec5
SJ
1440static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1441{
1442 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1443 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1444 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1445 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1446 hba->clk_scaling.enable_attr.attr.mode = 0644;
1447 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1448 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1449}
1450
1ab27c9c
ST
1451static void ufshcd_ungate_work(struct work_struct *work)
1452{
1453 int ret;
1454 unsigned long flags;
1455 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1456 clk_gating.ungate_work);
1457
1458 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1459
1460 spin_lock_irqsave(hba->host->host_lock, flags);
1461 if (hba->clk_gating.state == CLKS_ON) {
1462 spin_unlock_irqrestore(hba->host->host_lock, flags);
1463 goto unblock_reqs;
1464 }
1465
1466 spin_unlock_irqrestore(hba->host->host_lock, flags);
1467 ufshcd_setup_clocks(hba, true);
1468
1469 /* Exit from hibern8 */
1470 if (ufshcd_can_hibern8_during_gating(hba)) {
1471 /* Prevent gating in this path */
1472 hba->clk_gating.is_suspended = true;
1473 if (ufshcd_is_link_hibern8(hba)) {
1474 ret = ufshcd_uic_hibern8_exit(hba);
1475 if (ret)
1476 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1477 __func__, ret);
1478 else
1479 ufshcd_set_link_active(hba);
1480 }
1481 hba->clk_gating.is_suspended = false;
1482 }
1483unblock_reqs:
38135535 1484 ufshcd_scsi_unblock_requests(hba);
1ab27c9c
ST
1485}
1486
1487/**
1488 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1489 * Also, exit from hibern8 mode and set the link as active.
1490 * @hba: per adapter instance
1491 * @async: This indicates whether caller should ungate clocks asynchronously.
1492 */
1493int ufshcd_hold(struct ufs_hba *hba, bool async)
1494{
1495 int rc = 0;
1496 unsigned long flags;
1497
1498 if (!ufshcd_is_clkgating_allowed(hba))
1499 goto out;
1ab27c9c
ST
1500 spin_lock_irqsave(hba->host->host_lock, flags);
1501 hba->clk_gating.active_reqs++;
1502
53c12d0e
YG
1503 if (ufshcd_eh_in_progress(hba)) {
1504 spin_unlock_irqrestore(hba->host->host_lock, flags);
1505 return 0;
1506 }
1507
856b3483 1508start:
1ab27c9c
ST
1509 switch (hba->clk_gating.state) {
1510 case CLKS_ON:
f2a785ac
VG
1511 /*
1512 * Wait for the ungate work to complete if in progress.
1513 * Though the clocks may be in ON state, the link could
1514 * still be in hibner8 state if hibern8 is allowed
1515 * during clock gating.
1516 * Make sure we exit hibern8 state also in addition to
1517 * clocks being ON.
1518 */
1519 if (ufshcd_can_hibern8_during_gating(hba) &&
1520 ufshcd_is_link_hibern8(hba)) {
1521 spin_unlock_irqrestore(hba->host->host_lock, flags);
1522 flush_work(&hba->clk_gating.ungate_work);
1523 spin_lock_irqsave(hba->host->host_lock, flags);
1524 goto start;
1525 }
1ab27c9c
ST
1526 break;
1527 case REQ_CLKS_OFF:
1528 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1529 hba->clk_gating.state = CLKS_ON;
7ff5ab47
SJ
1530 trace_ufshcd_clk_gating(dev_name(hba->dev),
1531 hba->clk_gating.state);
1ab27c9c
ST
1532 break;
1533 }
1534 /*
9c490d2d 1535 * If we are here, it means gating work is either done or
1ab27c9c
ST
1536 * currently running. Hence, fall through to cancel gating
1537 * work and to enable clocks.
1538 */
30eb2e4c 1539 /* fallthrough */
1ab27c9c 1540 case CLKS_OFF:
38135535 1541 ufshcd_scsi_block_requests(hba);
1ab27c9c 1542 hba->clk_gating.state = REQ_CLKS_ON;
7ff5ab47
SJ
1543 trace_ufshcd_clk_gating(dev_name(hba->dev),
1544 hba->clk_gating.state);
10e5e375
VV
1545 queue_work(hba->clk_gating.clk_gating_workq,
1546 &hba->clk_gating.ungate_work);
1ab27c9c
ST
1547 /*
1548 * fall through to check if we should wait for this
1549 * work to be done or not.
1550 */
30eb2e4c 1551 /* fallthrough */
1ab27c9c
ST
1552 case REQ_CLKS_ON:
1553 if (async) {
1554 rc = -EAGAIN;
1555 hba->clk_gating.active_reqs--;
1556 break;
1557 }
1558
1559 spin_unlock_irqrestore(hba->host->host_lock, flags);
1560 flush_work(&hba->clk_gating.ungate_work);
1561 /* Make sure state is CLKS_ON before returning */
856b3483 1562 spin_lock_irqsave(hba->host->host_lock, flags);
1ab27c9c
ST
1563 goto start;
1564 default:
1565 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1566 __func__, hba->clk_gating.state);
1567 break;
1568 }
1569 spin_unlock_irqrestore(hba->host->host_lock, flags);
1570out:
1571 return rc;
1572}
6e3fd44d 1573EXPORT_SYMBOL_GPL(ufshcd_hold);
1ab27c9c
ST
1574
1575static void ufshcd_gate_work(struct work_struct *work)
1576{
1577 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1578 clk_gating.gate_work.work);
1579 unsigned long flags;
1580
1581 spin_lock_irqsave(hba->host->host_lock, flags);
3f0c06de
VG
1582 /*
1583 * In case you are here to cancel this work the gating state
1584 * would be marked as REQ_CLKS_ON. In this case save time by
1585 * skipping the gating work and exit after changing the clock
1586 * state to CLKS_ON.
1587 */
1588 if (hba->clk_gating.is_suspended ||
18f01374 1589 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1ab27c9c 1590 hba->clk_gating.state = CLKS_ON;
7ff5ab47
SJ
1591 trace_ufshcd_clk_gating(dev_name(hba->dev),
1592 hba->clk_gating.state);
1ab27c9c
ST
1593 goto rel_lock;
1594 }
1595
1596 if (hba->clk_gating.active_reqs
1597 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
7252a360 1598 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1ab27c9c
ST
1599 || hba->active_uic_cmd || hba->uic_async_done)
1600 goto rel_lock;
1601
1602 spin_unlock_irqrestore(hba->host->host_lock, flags);
1603
1604 /* put the link into hibern8 mode before turning off clocks */
1605 if (ufshcd_can_hibern8_during_gating(hba)) {
1606 if (ufshcd_uic_hibern8_enter(hba)) {
1607 hba->clk_gating.state = CLKS_ON;
7ff5ab47
SJ
1608 trace_ufshcd_clk_gating(dev_name(hba->dev),
1609 hba->clk_gating.state);
1ab27c9c
ST
1610 goto out;
1611 }
1612 ufshcd_set_link_hibern8(hba);
1613 }
1614
1615 if (!ufshcd_is_link_active(hba))
1616 ufshcd_setup_clocks(hba, false);
1617 else
1618 /* If link is active, device ref_clk can't be switched off */
1619 __ufshcd_setup_clocks(hba, false, true);
1620
1621 /*
1622 * In case you are here to cancel this work the gating state
1623 * would be marked as REQ_CLKS_ON. In this case keep the state
1624 * as REQ_CLKS_ON which would anyway imply that clocks are off
1625 * and a request to turn them on is pending. By doing this way,
1626 * we keep the state machine in tact and this would ultimately
1627 * prevent from doing cancel work multiple times when there are
1628 * new requests arriving before the current cancel work is done.
1629 */
1630 spin_lock_irqsave(hba->host->host_lock, flags);
7ff5ab47 1631 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1ab27c9c 1632 hba->clk_gating.state = CLKS_OFF;
7ff5ab47
SJ
1633 trace_ufshcd_clk_gating(dev_name(hba->dev),
1634 hba->clk_gating.state);
1635 }
1ab27c9c
ST
1636rel_lock:
1637 spin_unlock_irqrestore(hba->host->host_lock, flags);
1638out:
1639 return;
1640}
1641
1642/* host lock must be held before calling this variant */
1643static void __ufshcd_release(struct ufs_hba *hba)
1644{
1645 if (!ufshcd_is_clkgating_allowed(hba))
1646 return;
1647
1648 hba->clk_gating.active_reqs--;
1649
1650 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1651 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
7252a360 1652 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
53c12d0e
YG
1653 || hba->active_uic_cmd || hba->uic_async_done
1654 || ufshcd_eh_in_progress(hba))
1ab27c9c
ST
1655 return;
1656
1657 hba->clk_gating.state = REQ_CLKS_OFF;
7ff5ab47 1658 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
f4bb7704
EG
1659 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1660 &hba->clk_gating.gate_work,
1661 msecs_to_jiffies(hba->clk_gating.delay_ms));
1ab27c9c
ST
1662}
1663
1664void ufshcd_release(struct ufs_hba *hba)
1665{
1666 unsigned long flags;
1667
1668 spin_lock_irqsave(hba->host->host_lock, flags);
1669 __ufshcd_release(hba);
1670 spin_unlock_irqrestore(hba->host->host_lock, flags);
1671}
6e3fd44d 1672EXPORT_SYMBOL_GPL(ufshcd_release);
1ab27c9c
ST
1673
1674static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1675 struct device_attribute *attr, char *buf)
1676{
1677 struct ufs_hba *hba = dev_get_drvdata(dev);
1678
1679 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1680}
1681
1682static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1683 struct device_attribute *attr, const char *buf, size_t count)
1684{
1685 struct ufs_hba *hba = dev_get_drvdata(dev);
1686 unsigned long flags, value;
1687
1688 if (kstrtoul(buf, 0, &value))
1689 return -EINVAL;
1690
1691 spin_lock_irqsave(hba->host->host_lock, flags);
1692 hba->clk_gating.delay_ms = value;
1693 spin_unlock_irqrestore(hba->host->host_lock, flags);
1694 return count;
1695}
1696
b427411a
ST
1697static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1698 struct device_attribute *attr, char *buf)
1699{
1700 struct ufs_hba *hba = dev_get_drvdata(dev);
1701
1702 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1703}
1704
1705static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1706 struct device_attribute *attr, const char *buf, size_t count)
1707{
1708 struct ufs_hba *hba = dev_get_drvdata(dev);
1709 unsigned long flags;
1710 u32 value;
1711
1712 if (kstrtou32(buf, 0, &value))
1713 return -EINVAL;
1714
1715 value = !!value;
1716 if (value == hba->clk_gating.is_enabled)
1717 goto out;
1718
1719 if (value) {
1720 ufshcd_release(hba);
1721 } else {
1722 spin_lock_irqsave(hba->host->host_lock, flags);
1723 hba->clk_gating.active_reqs++;
1724 spin_unlock_irqrestore(hba->host->host_lock, flags);
1725 }
1726
1727 hba->clk_gating.is_enabled = value;
1728out:
1729 return count;
1730}
1731
eebcc196
VG
1732static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1733{
1734 char wq_name[sizeof("ufs_clkscaling_00")];
1735
1736 if (!ufshcd_is_clkscaling_supported(hba))
1737 return;
1738
1739 INIT_WORK(&hba->clk_scaling.suspend_work,
1740 ufshcd_clk_scaling_suspend_work);
1741 INIT_WORK(&hba->clk_scaling.resume_work,
1742 ufshcd_clk_scaling_resume_work);
1743
1744 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1745 hba->host->host_no);
1746 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1747
1748 ufshcd_clkscaling_init_sysfs(hba);
1749}
1750
1751static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1752{
1753 if (!ufshcd_is_clkscaling_supported(hba))
1754 return;
1755
1756 destroy_workqueue(hba->clk_scaling.workq);
1757 ufshcd_devfreq_remove(hba);
1758}
1759
1ab27c9c
ST
1760static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1761{
10e5e375
VV
1762 char wq_name[sizeof("ufs_clk_gating_00")];
1763
1ab27c9c
ST
1764 if (!ufshcd_is_clkgating_allowed(hba))
1765 return;
1766
1767 hba->clk_gating.delay_ms = 150;
1768 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1769 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1770
10e5e375
VV
1771 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1772 hba->host->host_no);
1773 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1774 WQ_MEM_RECLAIM);
1775
b427411a
ST
1776 hba->clk_gating.is_enabled = true;
1777
1ab27c9c
ST
1778 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1779 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1780 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1781 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
b427411a 1782 hba->clk_gating.delay_attr.attr.mode = 0644;
1ab27c9c
ST
1783 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1784 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
b427411a
ST
1785
1786 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1787 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1788 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1789 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1790 hba->clk_gating.enable_attr.attr.mode = 0644;
1791 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1792 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1ab27c9c
ST
1793}
1794
1795static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1796{
1797 if (!ufshcd_is_clkgating_allowed(hba))
1798 return;
1799 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
b427411a 1800 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
97cd6805
AM
1801 cancel_work_sync(&hba->clk_gating.ungate_work);
1802 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
10e5e375 1803 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1ab27c9c
ST
1804}
1805
856b3483
ST
1806/* Must be called with host lock acquired */
1807static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1808{
401f1e44
SJ
1809 bool queue_resume_work = false;
1810
fcb0c4b0 1811 if (!ufshcd_is_clkscaling_supported(hba))
856b3483
ST
1812 return;
1813
401f1e44
SJ
1814 if (!hba->clk_scaling.active_reqs++)
1815 queue_resume_work = true;
1816
1817 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1818 return;
1819
1820 if (queue_resume_work)
1821 queue_work(hba->clk_scaling.workq,
1822 &hba->clk_scaling.resume_work);
1823
1824 if (!hba->clk_scaling.window_start_t) {
1825 hba->clk_scaling.window_start_t = jiffies;
1826 hba->clk_scaling.tot_busy_t = 0;
1827 hba->clk_scaling.is_busy_started = false;
1828 }
1829
856b3483
ST
1830 if (!hba->clk_scaling.is_busy_started) {
1831 hba->clk_scaling.busy_start_t = ktime_get();
1832 hba->clk_scaling.is_busy_started = true;
1833 }
1834}
1835
1836static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1837{
1838 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1839
fcb0c4b0 1840 if (!ufshcd_is_clkscaling_supported(hba))
856b3483
ST
1841 return;
1842
1843 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1844 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1845 scaling->busy_start_t));
8b0e1953 1846 scaling->busy_start_t = 0;
856b3483
ST
1847 scaling->is_busy_started = false;
1848 }
1849}
7a3e97b0
SY
1850/**
1851 * ufshcd_send_command - Send SCSI or device management commands
1852 * @hba: per adapter instance
1853 * @task_tag: Task tag of the command
1854 */
1855static inline
1856void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1857{
ff8e20c6 1858 hba->lrb[task_tag].issue_time_stamp = ktime_get();
09017188 1859 hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
856b3483 1860 ufshcd_clk_scaling_start_busy(hba);
7a3e97b0 1861 __set_bit(task_tag, &hba->outstanding_reqs);
b873a275 1862 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
ad1a1b9c
GB
1863 /* Make sure that doorbell is committed immediately */
1864 wmb();
1a07f2d9 1865 ufshcd_add_command_trace(hba, task_tag, "send");
7a3e97b0
SY
1866}
1867
1868/**
1869 * ufshcd_copy_sense_data - Copy sense data in case of check condition
8aa29f19 1870 * @lrbp: pointer to local reference block
7a3e97b0
SY
1871 */
1872static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1873{
1874 int len;
1c2623c5
SJ
1875 if (lrbp->sense_buffer &&
1876 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
e3ce73d6
YG
1877 int len_to_copy;
1878
5a0b0cb9 1879 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
09a5a24f 1880 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
e3ce73d6 1881
09a5a24f
AA
1882 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
1883 len_to_copy);
7a3e97b0
SY
1884 }
1885}
1886
68078d5c
DR
1887/**
1888 * ufshcd_copy_query_response() - Copy the Query Response and the data
1889 * descriptor
1890 * @hba: per adapter instance
8aa29f19 1891 * @lrbp: pointer to local reference block
68078d5c
DR
1892 */
1893static
c6d4a831 1894int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
68078d5c
DR
1895{
1896 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1897
68078d5c 1898 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
68078d5c 1899
68078d5c 1900 /* Get the descriptor */
1c90836f
AA
1901 if (hba->dev_cmd.query.descriptor &&
1902 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
d44a5f98 1903 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
68078d5c 1904 GENERAL_UPIU_REQUEST_SIZE;
c6d4a831
DR
1905 u16 resp_len;
1906 u16 buf_len;
68078d5c
DR
1907
1908 /* data segment length */
c6d4a831 1909 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
68078d5c 1910 MASK_QUERY_DATA_SEG_LEN;
ea2aab24
SRT
1911 buf_len = be16_to_cpu(
1912 hba->dev_cmd.query.request.upiu_req.length);
c6d4a831
DR
1913 if (likely(buf_len >= resp_len)) {
1914 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1915 } else {
1916 dev_warn(hba->dev,
3d4881d1
BH
1917 "%s: rsp size %d is bigger than buffer size %d",
1918 __func__, resp_len, buf_len);
c6d4a831
DR
1919 return -EINVAL;
1920 }
68078d5c 1921 }
c6d4a831
DR
1922
1923 return 0;
68078d5c
DR
1924}
1925
7a3e97b0
SY
1926/**
1927 * ufshcd_hba_capabilities - Read controller capabilities
1928 * @hba: per adapter instance
1929 */
1930static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1931{
b873a275 1932 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
7a3e97b0
SY
1933
1934 /* nutrs and nutmrs are 0 based values */
1935 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1936 hba->nutmrs =
1937 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1938}
1939
1940/**
6ccf44fe
SJ
1941 * ufshcd_ready_for_uic_cmd - Check if controller is ready
1942 * to accept UIC commands
7a3e97b0 1943 * @hba: per adapter instance
6ccf44fe
SJ
1944 * Return true on success, else false
1945 */
1946static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1947{
1948 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1949 return true;
1950 else
1951 return false;
1952}
1953
53b3d9c3
SJ
1954/**
1955 * ufshcd_get_upmcrs - Get the power mode change request status
1956 * @hba: Pointer to adapter instance
1957 *
1958 * This function gets the UPMCRS field of HCS register
1959 * Returns value of UPMCRS field
1960 */
1961static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1962{
1963 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1964}
1965
6ccf44fe
SJ
1966/**
1967 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1968 * @hba: per adapter instance
1969 * @uic_cmd: UIC command
1970 *
1971 * Mutex must be held.
7a3e97b0
SY
1972 */
1973static inline void
6ccf44fe 1974ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
7a3e97b0 1975{
6ccf44fe
SJ
1976 WARN_ON(hba->active_uic_cmd);
1977
1978 hba->active_uic_cmd = uic_cmd;
1979
7a3e97b0 1980 /* Write Args */
6ccf44fe
SJ
1981 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
1982 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
1983 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
7a3e97b0
SY
1984
1985 /* Write UIC Cmd */
6ccf44fe 1986 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
b873a275 1987 REG_UIC_COMMAND);
7a3e97b0
SY
1988}
1989
6ccf44fe
SJ
1990/**
1991 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
1992 * @hba: per adapter instance
8aa29f19 1993 * @uic_cmd: UIC command
6ccf44fe
SJ
1994 *
1995 * Must be called with mutex held.
1996 * Returns 0 only if success.
1997 */
1998static int
1999ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2000{
2001 int ret;
2002 unsigned long flags;
2003
2004 if (wait_for_completion_timeout(&uic_cmd->done,
2005 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2006 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2007 else
2008 ret = -ETIMEDOUT;
2009
2010 spin_lock_irqsave(hba->host->host_lock, flags);
2011 hba->active_uic_cmd = NULL;
2012 spin_unlock_irqrestore(hba->host->host_lock, flags);
2013
2014 return ret;
2015}
2016
2017/**
2018 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2019 * @hba: per adapter instance
2020 * @uic_cmd: UIC command
d75f7fe4 2021 * @completion: initialize the completion only if this is set to true
6ccf44fe
SJ
2022 *
2023 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
57d104c1 2024 * with mutex held and host_lock locked.
6ccf44fe
SJ
2025 * Returns 0 only if success.
2026 */
2027static int
d75f7fe4
YG
2028__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2029 bool completion)
6ccf44fe 2030{
6ccf44fe
SJ
2031 if (!ufshcd_ready_for_uic_cmd(hba)) {
2032 dev_err(hba->dev,
2033 "Controller not ready to accept UIC commands\n");
2034 return -EIO;
2035 }
2036
d75f7fe4
YG
2037 if (completion)
2038 init_completion(&uic_cmd->done);
6ccf44fe 2039
6ccf44fe 2040 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
6ccf44fe 2041
57d104c1 2042 return 0;
6ccf44fe
SJ
2043}
2044
2045/**
2046 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2047 * @hba: per adapter instance
2048 * @uic_cmd: UIC command
2049 *
2050 * Returns 0 only if success.
2051 */
e77044c5 2052int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
6ccf44fe
SJ
2053{
2054 int ret;
57d104c1 2055 unsigned long flags;
6ccf44fe 2056
1ab27c9c 2057 ufshcd_hold(hba, false);
6ccf44fe 2058 mutex_lock(&hba->uic_cmd_mutex);
cad2e03d
YG
2059 ufshcd_add_delay_before_dme_cmd(hba);
2060
57d104c1 2061 spin_lock_irqsave(hba->host->host_lock, flags);
d75f7fe4 2062 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
57d104c1
SJ
2063 spin_unlock_irqrestore(hba->host->host_lock, flags);
2064 if (!ret)
2065 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2066
6ccf44fe
SJ
2067 mutex_unlock(&hba->uic_cmd_mutex);
2068
1ab27c9c 2069 ufshcd_release(hba);
6ccf44fe
SJ
2070 return ret;
2071}
2072
7a3e97b0
SY
2073/**
2074 * ufshcd_map_sg - Map scatter-gather list to prdt
8aa29f19
BVA
2075 * @hba: per adapter instance
2076 * @lrbp: pointer to local reference block
7a3e97b0
SY
2077 *
2078 * Returns 0 in case of success, non-zero value in case of failure
2079 */
75b1cc4a 2080static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
7a3e97b0
SY
2081{
2082 struct ufshcd_sg_entry *prd_table;
2083 struct scatterlist *sg;
2084 struct scsi_cmnd *cmd;
2085 int sg_segments;
2086 int i;
2087
2088 cmd = lrbp->cmd;
2089 sg_segments = scsi_dma_map(cmd);
2090 if (sg_segments < 0)
2091 return sg_segments;
2092
2093 if (sg_segments) {
75b1cc4a
KK
2094 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2095 lrbp->utr_descriptor_ptr->prd_table_length =
2096 cpu_to_le16((u16)(sg_segments *
2097 sizeof(struct ufshcd_sg_entry)));
2098 else
2099 lrbp->utr_descriptor_ptr->prd_table_length =
2100 cpu_to_le16((u16) (sg_segments));
7a3e97b0
SY
2101
2102 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2103
2104 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2105 prd_table[i].size =
2106 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2107 prd_table[i].base_addr =
2108 cpu_to_le32(lower_32_bits(sg->dma_address));
2109 prd_table[i].upper_addr =
2110 cpu_to_le32(upper_32_bits(sg->dma_address));
52ac95fe 2111 prd_table[i].reserved = 0;
7a3e97b0
SY
2112 }
2113 } else {
2114 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2115 }
2116
2117 return 0;
2118}
2119
2120/**
2fbd009b 2121 * ufshcd_enable_intr - enable interrupts
7a3e97b0 2122 * @hba: per adapter instance
2fbd009b 2123 * @intrs: interrupt bits
7a3e97b0 2124 */
2fbd009b 2125static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
7a3e97b0 2126{
2fbd009b
SJ
2127 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2128
2129 if (hba->ufs_version == UFSHCI_VERSION_10) {
2130 u32 rw;
2131 rw = set & INTERRUPT_MASK_RW_VER_10;
2132 set = rw | ((set ^ intrs) & intrs);
2133 } else {
2134 set |= intrs;
2135 }
2136
2137 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2138}
2139
2140/**
2141 * ufshcd_disable_intr - disable interrupts
2142 * @hba: per adapter instance
2143 * @intrs: interrupt bits
2144 */
2145static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2146{
2147 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2148
2149 if (hba->ufs_version == UFSHCI_VERSION_10) {
2150 u32 rw;
2151 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2152 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2153 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2154
2155 } else {
2156 set &= ~intrs;
7a3e97b0 2157 }
2fbd009b
SJ
2158
2159 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
7a3e97b0
SY
2160}
2161
5a0b0cb9
SRT
2162/**
2163 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2164 * descriptor according to request
2165 * @lrbp: pointer to local reference block
2166 * @upiu_flags: flags required in the header
2167 * @cmd_dir: requests data direction
2168 */
2169static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
300bb13f 2170 u32 *upiu_flags, enum dma_data_direction cmd_dir)
5a0b0cb9
SRT
2171{
2172 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2173 u32 data_direction;
2174 u32 dword_0;
2175
2176 if (cmd_dir == DMA_FROM_DEVICE) {
2177 data_direction = UTP_DEVICE_TO_HOST;
2178 *upiu_flags = UPIU_CMD_FLAGS_READ;
2179 } else if (cmd_dir == DMA_TO_DEVICE) {
2180 data_direction = UTP_HOST_TO_DEVICE;
2181 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2182 } else {
2183 data_direction = UTP_NO_DATA_TRANSFER;
2184 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2185 }
2186
2187 dword_0 = data_direction | (lrbp->command_type
2188 << UPIU_COMMAND_TYPE_OFFSET);
2189 if (lrbp->intr_cmd)
2190 dword_0 |= UTP_REQ_DESC_INT_CMD;
2191
2192 /* Transfer request descriptor header fields */
2193 req_desc->header.dword_0 = cpu_to_le32(dword_0);
52ac95fe
YG
2194 /* dword_1 is reserved, hence it is set to 0 */
2195 req_desc->header.dword_1 = 0;
5a0b0cb9
SRT
2196 /*
2197 * assigning invalid value for command status. Controller
2198 * updates OCS on command completion, with the command
2199 * status
2200 */
2201 req_desc->header.dword_2 =
2202 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
52ac95fe
YG
2203 /* dword_3 is reserved, hence it is set to 0 */
2204 req_desc->header.dword_3 = 0;
51047266
YG
2205
2206 req_desc->prd_table_length = 0;
5a0b0cb9
SRT
2207}
2208
2209/**
2210 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2211 * for scsi commands
8aa29f19
BVA
2212 * @lrbp: local reference block pointer
2213 * @upiu_flags: flags
5a0b0cb9
SRT
2214 */
2215static
2216void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2217{
2218 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
52ac95fe 2219 unsigned short cdb_len;
5a0b0cb9
SRT
2220
2221 /* command descriptor fields */
2222 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2223 UPIU_TRANSACTION_COMMAND, upiu_flags,
2224 lrbp->lun, lrbp->task_tag);
2225 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2226 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2227
2228 /* Total EHS length and Data segment length will be zero */
2229 ucd_req_ptr->header.dword_2 = 0;
2230
2231 ucd_req_ptr->sc.exp_data_transfer_len =
2232 cpu_to_be32(lrbp->cmd->sdb.length);
2233
a851b2bd
AA
2234 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
2235 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
52ac95fe
YG
2236 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2237
2238 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5a0b0cb9
SRT
2239}
2240
68078d5c
DR
2241/**
2242 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2243 * for query requsts
2244 * @hba: UFS hba
2245 * @lrbp: local reference block pointer
2246 * @upiu_flags: flags
2247 */
2248static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2249 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2250{
2251 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2252 struct ufs_query *query = &hba->dev_cmd.query;
e8c8e82a 2253 u16 len = be16_to_cpu(query->request.upiu_req.length);
68078d5c
DR
2254
2255 /* Query request header */
2256 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2257 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2258 lrbp->lun, lrbp->task_tag);
2259 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2260 0, query->request.query_func, 0, 0);
2261
6861285c
ZL
2262 /* Data segment length only need for WRITE_DESC */
2263 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2264 ucd_req_ptr->header.dword_2 =
2265 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2266 else
2267 ucd_req_ptr->header.dword_2 = 0;
68078d5c
DR
2268
2269 /* Copy the Query Request buffer as is */
2270 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2271 QUERY_OSF_SIZE);
68078d5c
DR
2272
2273 /* Copy the Descriptor */
c6d4a831 2274 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
220d17a6 2275 memcpy(ucd_req_ptr + 1, query->descriptor, len);
c6d4a831 2276
51047266 2277 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
68078d5c
DR
2278}
2279
5a0b0cb9
SRT
2280static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2281{
2282 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2283
2284 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2285
2286 /* command descriptor fields */
2287 ucd_req_ptr->header.dword_0 =
2288 UPIU_HEADER_DWORD(
2289 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
51047266
YG
2290 /* clear rest of the fields of basic header */
2291 ucd_req_ptr->header.dword_1 = 0;
2292 ucd_req_ptr->header.dword_2 = 0;
2293
2294 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5a0b0cb9
SRT
2295}
2296
7a3e97b0 2297/**
300bb13f
JP
2298 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2299 * for Device Management Purposes
8aa29f19
BVA
2300 * @hba: per adapter instance
2301 * @lrbp: pointer to local reference block
7a3e97b0 2302 */
300bb13f 2303static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
7a3e97b0 2304{
7a3e97b0 2305 u32 upiu_flags;
5a0b0cb9 2306 int ret = 0;
7a3e97b0 2307
83dc7e3d 2308 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2309 (hba->ufs_version == UFSHCI_VERSION_11))
300bb13f 2310 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
83dc7e3d 2311 else
2312 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
300bb13f
JP
2313
2314 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2315 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2316 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2317 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2318 ufshcd_prepare_utp_nop_upiu(lrbp);
2319 else
2320 ret = -EINVAL;
2321
2322 return ret;
2323}
2324
2325/**
2326 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2327 * for SCSI Purposes
8aa29f19
BVA
2328 * @hba: per adapter instance
2329 * @lrbp: pointer to local reference block
300bb13f
JP
2330 */
2331static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2332{
2333 u32 upiu_flags;
2334 int ret = 0;
2335
83dc7e3d 2336 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2337 (hba->ufs_version == UFSHCI_VERSION_11))
300bb13f 2338 lrbp->command_type = UTP_CMD_TYPE_SCSI;
83dc7e3d 2339 else
2340 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
300bb13f
JP
2341
2342 if (likely(lrbp->cmd)) {
2343 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2344 lrbp->cmd->sc_data_direction);
2345 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2346 } else {
2347 ret = -EINVAL;
2348 }
5a0b0cb9
SRT
2349
2350 return ret;
7a3e97b0
SY
2351}
2352
2a8fa600
SJ
2353/**
2354 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
8aa29f19 2355 * @upiu_wlun_id: UPIU W-LUN id
2a8fa600
SJ
2356 *
2357 * Returns SCSI W-LUN id
2358 */
2359static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2360{
2361 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2362}
2363
7a3e97b0
SY
2364/**
2365 * ufshcd_queuecommand - main entry point for SCSI requests
8aa29f19 2366 * @host: SCSI host pointer
7a3e97b0 2367 * @cmd: command from SCSI Midlayer
7a3e97b0
SY
2368 *
2369 * Returns 0 for success, non-zero in case of failure
2370 */
2371static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2372{
2373 struct ufshcd_lrb *lrbp;
2374 struct ufs_hba *hba;
2375 unsigned long flags;
2376 int tag;
2377 int err = 0;
2378
2379 hba = shost_priv(host);
2380
2381 tag = cmd->request->tag;
14497328
YG
2382 if (!ufshcd_valid_tag(hba, tag)) {
2383 dev_err(hba->dev,
2384 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2385 __func__, tag, cmd, cmd->request);
2386 BUG();
2387 }
7a3e97b0 2388
a3cd5ec5
SJ
2389 if (!down_read_trylock(&hba->clk_scaling_lock))
2390 return SCSI_MLQUEUE_HOST_BUSY;
2391
3441da7d
SRT
2392 spin_lock_irqsave(hba->host->host_lock, flags);
2393 switch (hba->ufshcd_state) {
2394 case UFSHCD_STATE_OPERATIONAL:
2395 break;
141f8165 2396 case UFSHCD_STATE_EH_SCHEDULED:
3441da7d 2397 case UFSHCD_STATE_RESET:
7a3e97b0 2398 err = SCSI_MLQUEUE_HOST_BUSY;
3441da7d
SRT
2399 goto out_unlock;
2400 case UFSHCD_STATE_ERROR:
2401 set_host_byte(cmd, DID_ERROR);
2402 cmd->scsi_done(cmd);
2403 goto out_unlock;
2404 default:
2405 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2406 __func__, hba->ufshcd_state);
2407 set_host_byte(cmd, DID_BAD_TARGET);
2408 cmd->scsi_done(cmd);
2409 goto out_unlock;
7a3e97b0 2410 }
53c12d0e
YG
2411
2412 /* if error handling is in progress, don't issue commands */
2413 if (ufshcd_eh_in_progress(hba)) {
2414 set_host_byte(cmd, DID_ERROR);
2415 cmd->scsi_done(cmd);
2416 goto out_unlock;
2417 }
3441da7d 2418 spin_unlock_irqrestore(hba->host->host_lock, flags);
7a3e97b0 2419
7fabb77b
GB
2420 hba->req_abort_count = 0;
2421
1ab27c9c
ST
2422 err = ufshcd_hold(hba, true);
2423 if (err) {
2424 err = SCSI_MLQUEUE_HOST_BUSY;
1ab27c9c
ST
2425 goto out;
2426 }
2427 WARN_ON(hba->clk_gating.state != CLKS_ON);
2428
7a3e97b0
SY
2429 lrbp = &hba->lrb[tag];
2430
5a0b0cb9 2431 WARN_ON(lrbp->cmd);
7a3e97b0 2432 lrbp->cmd = cmd;
09a5a24f 2433 lrbp->sense_bufflen = UFS_SENSE_SIZE;
7a3e97b0
SY
2434 lrbp->sense_buffer = cmd->sense_buffer;
2435 lrbp->task_tag = tag;
0ce147d4 2436 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
b852190e 2437 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
e0b299e3 2438 lrbp->req_abort_skip = false;
7a3e97b0 2439
300bb13f
JP
2440 ufshcd_comp_scsi_upiu(hba, lrbp);
2441
75b1cc4a 2442 err = ufshcd_map_sg(hba, lrbp);
5a0b0cb9
SRT
2443 if (err) {
2444 lrbp->cmd = NULL;
17c7d35f 2445 ufshcd_release(hba);
7a3e97b0 2446 goto out;
5a0b0cb9 2447 }
ad1a1b9c
GB
2448 /* Make sure descriptors are ready before ringing the doorbell */
2449 wmb();
7a3e97b0
SY
2450
2451 /* issue command to the controller */
2452 spin_lock_irqsave(hba->host->host_lock, flags);
0e675efa 2453 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
7a3e97b0 2454 ufshcd_send_command(hba, tag);
3441da7d 2455out_unlock:
7a3e97b0
SY
2456 spin_unlock_irqrestore(hba->host->host_lock, flags);
2457out:
a3cd5ec5 2458 up_read(&hba->clk_scaling_lock);
7a3e97b0
SY
2459 return err;
2460}
2461
5a0b0cb9
SRT
2462static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2463 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2464{
2465 lrbp->cmd = NULL;
2466 lrbp->sense_bufflen = 0;
2467 lrbp->sense_buffer = NULL;
2468 lrbp->task_tag = tag;
2469 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
5a0b0cb9
SRT
2470 lrbp->intr_cmd = true; /* No interrupt aggregation */
2471 hba->dev_cmd.type = cmd_type;
2472
300bb13f 2473 return ufshcd_comp_devman_upiu(hba, lrbp);
5a0b0cb9
SRT
2474}
2475
2476static int
2477ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2478{
2479 int err = 0;
2480 unsigned long flags;
2481 u32 mask = 1 << tag;
2482
2483 /* clear outstanding transaction before retry */
2484 spin_lock_irqsave(hba->host->host_lock, flags);
2485 ufshcd_utrl_clear(hba, tag);
2486 spin_unlock_irqrestore(hba->host->host_lock, flags);
2487
2488 /*
2489 * wait for for h/w to clear corresponding bit in door-bell.
2490 * max. wait is 1 sec.
2491 */
2492 err = ufshcd_wait_for_register(hba,
2493 REG_UTP_TRANSFER_REQ_DOOR_BELL,
596585a2 2494 mask, ~mask, 1000, 1000, true);
5a0b0cb9
SRT
2495
2496 return err;
2497}
2498
c6d4a831
DR
2499static int
2500ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2501{
2502 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2503
2504 /* Get the UPIU response */
2505 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2506 UPIU_RSP_CODE_OFFSET;
2507 return query_res->response;
2508}
2509
5a0b0cb9
SRT
2510/**
2511 * ufshcd_dev_cmd_completion() - handles device management command responses
2512 * @hba: per adapter instance
2513 * @lrbp: pointer to local reference block
2514 */
2515static int
2516ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2517{
2518 int resp;
2519 int err = 0;
2520
ff8e20c6 2521 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5a0b0cb9
SRT
2522 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2523
2524 switch (resp) {
2525 case UPIU_TRANSACTION_NOP_IN:
2526 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2527 err = -EINVAL;
2528 dev_err(hba->dev, "%s: unexpected response %x\n",
2529 __func__, resp);
2530 }
2531 break;
68078d5c 2532 case UPIU_TRANSACTION_QUERY_RSP:
c6d4a831
DR
2533 err = ufshcd_check_query_response(hba, lrbp);
2534 if (!err)
2535 err = ufshcd_copy_query_response(hba, lrbp);
68078d5c 2536 break;
5a0b0cb9
SRT
2537 case UPIU_TRANSACTION_REJECT_UPIU:
2538 /* TODO: handle Reject UPIU Response */
2539 err = -EPERM;
2540 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2541 __func__);
2542 break;
2543 default:
2544 err = -EINVAL;
2545 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2546 __func__, resp);
2547 break;
2548 }
2549
2550 return err;
2551}
2552
2553static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2554 struct ufshcd_lrb *lrbp, int max_timeout)
2555{
2556 int err = 0;
2557 unsigned long time_left;
2558 unsigned long flags;
2559
2560 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2561 msecs_to_jiffies(max_timeout));
2562
ad1a1b9c
GB
2563 /* Make sure descriptors are ready before ringing the doorbell */
2564 wmb();
5a0b0cb9
SRT
2565 spin_lock_irqsave(hba->host->host_lock, flags);
2566 hba->dev_cmd.complete = NULL;
2567 if (likely(time_left)) {
2568 err = ufshcd_get_tr_ocs(lrbp);
2569 if (!err)
2570 err = ufshcd_dev_cmd_completion(hba, lrbp);
2571 }
2572 spin_unlock_irqrestore(hba->host->host_lock, flags);
2573
2574 if (!time_left) {
2575 err = -ETIMEDOUT;
a48353f6
YG
2576 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2577 __func__, lrbp->task_tag);
5a0b0cb9 2578 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
a48353f6 2579 /* successfully cleared the command, retry if needed */
5a0b0cb9 2580 err = -EAGAIN;
a48353f6
YG
2581 /*
2582 * in case of an error, after clearing the doorbell,
2583 * we also need to clear the outstanding_request
2584 * field in hba
2585 */
2586 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
5a0b0cb9
SRT
2587 }
2588
2589 return err;
2590}
2591
5a0b0cb9
SRT
2592/**
2593 * ufshcd_exec_dev_cmd - API for sending device management requests
8aa29f19
BVA
2594 * @hba: UFS hba
2595 * @cmd_type: specifies the type (NOP, Query...)
2596 * @timeout: time in seconds
5a0b0cb9 2597 *
68078d5c
DR
2598 * NOTE: Since there is only one available tag for device management commands,
2599 * it is expected you hold the hba->dev_cmd.lock mutex.
5a0b0cb9
SRT
2600 */
2601static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2602 enum dev_cmd_type cmd_type, int timeout)
2603{
7252a360
BVA
2604 struct request_queue *q = hba->cmd_queue;
2605 struct request *req;
5a0b0cb9
SRT
2606 struct ufshcd_lrb *lrbp;
2607 int err;
2608 int tag;
2609 struct completion wait;
2610 unsigned long flags;
2611
a3cd5ec5
SJ
2612 down_read(&hba->clk_scaling_lock);
2613
5a0b0cb9
SRT
2614 /*
2615 * Get free slot, sleep if slots are unavailable.
2616 * Even though we use wait_event() which sleeps indefinitely,
2617 * the maximum wait time is bounded by SCSI request timeout.
2618 */
7252a360 2619 req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
bb14dd15
DC
2620 if (IS_ERR(req)) {
2621 err = PTR_ERR(req);
2622 goto out_unlock;
2623 }
7252a360
BVA
2624 tag = req->tag;
2625 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
5a0b0cb9
SRT
2626
2627 init_completion(&wait);
2628 lrbp = &hba->lrb[tag];
2629 WARN_ON(lrbp->cmd);
2630 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2631 if (unlikely(err))
2632 goto out_put_tag;
2633
2634 hba->dev_cmd.complete = &wait;
2635
6667e6d9 2636 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
e3dfdc53
YG
2637 /* Make sure descriptors are ready before ringing the doorbell */
2638 wmb();
5a0b0cb9 2639 spin_lock_irqsave(hba->host->host_lock, flags);
0e675efa 2640 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
5a0b0cb9
SRT
2641 ufshcd_send_command(hba, tag);
2642 spin_unlock_irqrestore(hba->host->host_lock, flags);
2643
2644 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2645
6667e6d9
OS
2646 ufshcd_add_query_upiu_trace(hba, tag,
2647 err ? "query_complete_err" : "query_complete");
2648
5a0b0cb9 2649out_put_tag:
7252a360 2650 blk_put_request(req);
bb14dd15 2651out_unlock:
a3cd5ec5 2652 up_read(&hba->clk_scaling_lock);
5a0b0cb9
SRT
2653 return err;
2654}
2655
d44a5f98
DR
2656/**
2657 * ufshcd_init_query() - init the query response and request parameters
2658 * @hba: per-adapter instance
2659 * @request: address of the request pointer to be initialized
2660 * @response: address of the response pointer to be initialized
2661 * @opcode: operation to perform
2662 * @idn: flag idn to access
2663 * @index: LU number to access
2664 * @selector: query/flag/descriptor further identification
2665 */
2666static inline void ufshcd_init_query(struct ufs_hba *hba,
2667 struct ufs_query_req **request, struct ufs_query_res **response,
2668 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2669{
2670 *request = &hba->dev_cmd.query.request;
2671 *response = &hba->dev_cmd.query.response;
2672 memset(*request, 0, sizeof(struct ufs_query_req));
2673 memset(*response, 0, sizeof(struct ufs_query_res));
2674 (*request)->upiu_req.opcode = opcode;
2675 (*request)->upiu_req.idn = idn;
2676 (*request)->upiu_req.index = index;
2677 (*request)->upiu_req.selector = selector;
2678}
2679
dc3c8d3a
YG
2680static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2681 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2682{
2683 int ret;
2684 int retries;
2685
2686 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2687 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2688 if (ret)
2689 dev_dbg(hba->dev,
2690 "%s: failed with error %d, retries %d\n",
2691 __func__, ret, retries);
2692 else
2693 break;
2694 }
2695
2696 if (ret)
2697 dev_err(hba->dev,
2698 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2699 __func__, opcode, idn, ret, retries);
2700 return ret;
2701}
2702
68078d5c
DR
2703/**
2704 * ufshcd_query_flag() - API function for sending flag query requests
8aa29f19
BVA
2705 * @hba: per-adapter instance
2706 * @opcode: flag query to perform
2707 * @idn: flag idn to access
2708 * @flag_res: the flag value after the query request completes
68078d5c
DR
2709 *
2710 * Returns 0 for success, non-zero in case of failure
2711 */
dc3c8d3a 2712int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
68078d5c
DR
2713 enum flag_idn idn, bool *flag_res)
2714{
d44a5f98
DR
2715 struct ufs_query_req *request = NULL;
2716 struct ufs_query_res *response = NULL;
2717 int err, index = 0, selector = 0;
e5ad406c 2718 int timeout = QUERY_REQ_TIMEOUT;
68078d5c
DR
2719
2720 BUG_ON(!hba);
2721
1ab27c9c 2722 ufshcd_hold(hba, false);
68078d5c 2723 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
2724 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2725 selector);
68078d5c
DR
2726
2727 switch (opcode) {
2728 case UPIU_QUERY_OPCODE_SET_FLAG:
2729 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2730 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2731 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2732 break;
2733 case UPIU_QUERY_OPCODE_READ_FLAG:
2734 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2735 if (!flag_res) {
2736 /* No dummy reads */
2737 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2738 __func__);
2739 err = -EINVAL;
2740 goto out_unlock;
2741 }
2742 break;
2743 default:
2744 dev_err(hba->dev,
2745 "%s: Expected query flag opcode but got = %d\n",
2746 __func__, opcode);
2747 err = -EINVAL;
2748 goto out_unlock;
2749 }
68078d5c 2750
e5ad406c 2751 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
68078d5c
DR
2752
2753 if (err) {
2754 dev_err(hba->dev,
2755 "%s: Sending flag query for idn %d failed, err = %d\n",
2756 __func__, idn, err);
2757 goto out_unlock;
2758 }
2759
2760 if (flag_res)
e8c8e82a 2761 *flag_res = (be32_to_cpu(response->upiu_res.value) &
68078d5c
DR
2762 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2763
2764out_unlock:
2765 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 2766 ufshcd_release(hba);
68078d5c
DR
2767 return err;
2768}
2769
66ec6d59
SRT
2770/**
2771 * ufshcd_query_attr - API function for sending attribute requests
8aa29f19
BVA
2772 * @hba: per-adapter instance
2773 * @opcode: attribute opcode
2774 * @idn: attribute idn to access
2775 * @index: index field
2776 * @selector: selector field
2777 * @attr_val: the attribute value after the query request completes
66ec6d59
SRT
2778 *
2779 * Returns 0 for success, non-zero in case of failure
2780*/
ec92b59c
SN
2781int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2782 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
66ec6d59 2783{
d44a5f98
DR
2784 struct ufs_query_req *request = NULL;
2785 struct ufs_query_res *response = NULL;
66ec6d59
SRT
2786 int err;
2787
2788 BUG_ON(!hba);
2789
1ab27c9c 2790 ufshcd_hold(hba, false);
66ec6d59
SRT
2791 if (!attr_val) {
2792 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2793 __func__, opcode);
2794 err = -EINVAL;
2795 goto out;
2796 }
2797
2798 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
2799 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2800 selector);
66ec6d59
SRT
2801
2802 switch (opcode) {
2803 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2804 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
e8c8e82a 2805 request->upiu_req.value = cpu_to_be32(*attr_val);
66ec6d59
SRT
2806 break;
2807 case UPIU_QUERY_OPCODE_READ_ATTR:
2808 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2809 break;
2810 default:
2811 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2812 __func__, opcode);
2813 err = -EINVAL;
2814 goto out_unlock;
2815 }
2816
d44a5f98 2817 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
66ec6d59
SRT
2818
2819 if (err) {
4b761b58
YG
2820 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2821 __func__, opcode, idn, index, err);
66ec6d59
SRT
2822 goto out_unlock;
2823 }
2824
e8c8e82a 2825 *attr_val = be32_to_cpu(response->upiu_res.value);
66ec6d59
SRT
2826
2827out_unlock:
2828 mutex_unlock(&hba->dev_cmd.lock);
2829out:
1ab27c9c 2830 ufshcd_release(hba);
66ec6d59
SRT
2831 return err;
2832}
2833
5e86ae44
YG
2834/**
2835 * ufshcd_query_attr_retry() - API function for sending query
2836 * attribute with retries
2837 * @hba: per-adapter instance
2838 * @opcode: attribute opcode
2839 * @idn: attribute idn to access
2840 * @index: index field
2841 * @selector: selector field
2842 * @attr_val: the attribute value after the query request
2843 * completes
2844 *
2845 * Returns 0 for success, non-zero in case of failure
2846*/
2847static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2848 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2849 u32 *attr_val)
2850{
2851 int ret = 0;
2852 u32 retries;
2853
2854 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2855 ret = ufshcd_query_attr(hba, opcode, idn, index,
2856 selector, attr_val);
2857 if (ret)
2858 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2859 __func__, ret, retries);
2860 else
2861 break;
2862 }
2863
2864 if (ret)
2865 dev_err(hba->dev,
2866 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2867 __func__, idn, ret, QUERY_REQ_RETRIES);
2868 return ret;
2869}
2870
a70e91b8 2871static int __ufshcd_query_descriptor(struct ufs_hba *hba,
d44a5f98
DR
2872 enum query_opcode opcode, enum desc_idn idn, u8 index,
2873 u8 selector, u8 *desc_buf, int *buf_len)
2874{
2875 struct ufs_query_req *request = NULL;
2876 struct ufs_query_res *response = NULL;
2877 int err;
2878
2879 BUG_ON(!hba);
2880
1ab27c9c 2881 ufshcd_hold(hba, false);
d44a5f98
DR
2882 if (!desc_buf) {
2883 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2884 __func__, opcode);
2885 err = -EINVAL;
2886 goto out;
2887 }
2888
a4b0e8a4 2889 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
d44a5f98
DR
2890 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2891 __func__, *buf_len);
2892 err = -EINVAL;
2893 goto out;
2894 }
2895
2896 mutex_lock(&hba->dev_cmd.lock);
2897 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2898 selector);
2899 hba->dev_cmd.query.descriptor = desc_buf;
ea2aab24 2900 request->upiu_req.length = cpu_to_be16(*buf_len);
d44a5f98
DR
2901
2902 switch (opcode) {
2903 case UPIU_QUERY_OPCODE_WRITE_DESC:
2904 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2905 break;
2906 case UPIU_QUERY_OPCODE_READ_DESC:
2907 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2908 break;
2909 default:
2910 dev_err(hba->dev,
2911 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2912 __func__, opcode);
2913 err = -EINVAL;
2914 goto out_unlock;
2915 }
2916
2917 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2918
2919 if (err) {
4b761b58
YG
2920 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2921 __func__, opcode, idn, index, err);
d44a5f98
DR
2922 goto out_unlock;
2923 }
2924
ea2aab24 2925 *buf_len = be16_to_cpu(response->upiu_res.length);
d44a5f98
DR
2926
2927out_unlock:
cfcbae38 2928 hba->dev_cmd.query.descriptor = NULL;
d44a5f98
DR
2929 mutex_unlock(&hba->dev_cmd.lock);
2930out:
1ab27c9c 2931 ufshcd_release(hba);
d44a5f98
DR
2932 return err;
2933}
2934
a70e91b8 2935/**
8aa29f19
BVA
2936 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
2937 * @hba: per-adapter instance
2938 * @opcode: attribute opcode
2939 * @idn: attribute idn to access
2940 * @index: index field
2941 * @selector: selector field
2942 * @desc_buf: the buffer that contains the descriptor
2943 * @buf_len: length parameter passed to the device
a70e91b8
YG
2944 *
2945 * Returns 0 for success, non-zero in case of failure.
2946 * The buf_len parameter will contain, on return, the length parameter
2947 * received on the response.
2948 */
2238d31c
SN
2949int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2950 enum query_opcode opcode,
2951 enum desc_idn idn, u8 index,
2952 u8 selector,
2953 u8 *desc_buf, int *buf_len)
a70e91b8
YG
2954{
2955 int err;
2956 int retries;
2957
2958 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2959 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2960 selector, desc_buf, buf_len);
2961 if (!err || err == -EINVAL)
2962 break;
2963 }
2964
2965 return err;
2966}
a70e91b8 2967
a4b0e8a4
PM
2968/**
2969 * ufshcd_read_desc_length - read the specified descriptor length from header
2970 * @hba: Pointer to adapter instance
2971 * @desc_id: descriptor idn value
2972 * @desc_index: descriptor index
2973 * @desc_length: pointer to variable to read the length of descriptor
2974 *
2975 * Return 0 in case of success, non-zero otherwise
2976 */
2977static int ufshcd_read_desc_length(struct ufs_hba *hba,
2978 enum desc_idn desc_id,
2979 int desc_index,
2980 int *desc_length)
2981{
2982 int ret;
2983 u8 header[QUERY_DESC_HDR_SIZE];
2984 int header_len = QUERY_DESC_HDR_SIZE;
2985
2986 if (desc_id >= QUERY_DESC_IDN_MAX)
2987 return -EINVAL;
2988
2989 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2990 desc_id, desc_index, 0, header,
2991 &header_len);
2992
2993 if (ret) {
2994 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
2995 __func__, desc_id);
2996 return ret;
2997 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
2998 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
2999 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3000 desc_id);
3001 ret = -EINVAL;
3002 }
3003
3004 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3005 return ret;
3006
3007}
3008
3009/**
3010 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3011 * @hba: Pointer to adapter instance
3012 * @desc_id: descriptor idn value
3013 * @desc_len: mapped desc length (out)
3014 *
3015 * Return 0 in case of success, non-zero otherwise
3016 */
3017int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3018 enum desc_idn desc_id, int *desc_len)
3019{
3020 switch (desc_id) {
3021 case QUERY_DESC_IDN_DEVICE:
3022 *desc_len = hba->desc_size.dev_desc;
3023 break;
3024 case QUERY_DESC_IDN_POWER:
3025 *desc_len = hba->desc_size.pwr_desc;
3026 break;
3027 case QUERY_DESC_IDN_GEOMETRY:
3028 *desc_len = hba->desc_size.geom_desc;
3029 break;
3030 case QUERY_DESC_IDN_CONFIGURATION:
3031 *desc_len = hba->desc_size.conf_desc;
3032 break;
3033 case QUERY_DESC_IDN_UNIT:
3034 *desc_len = hba->desc_size.unit_desc;
3035 break;
3036 case QUERY_DESC_IDN_INTERCONNECT:
3037 *desc_len = hba->desc_size.interc_desc;
3038 break;
3039 case QUERY_DESC_IDN_STRING:
3040 *desc_len = QUERY_DESC_MAX_SIZE;
3041 break;
c648c2d2
SN
3042 case QUERY_DESC_IDN_HEALTH:
3043 *desc_len = hba->desc_size.hlth_desc;
3044 break;
a4b0e8a4
PM
3045 case QUERY_DESC_IDN_RFU_0:
3046 case QUERY_DESC_IDN_RFU_1:
3047 *desc_len = 0;
3048 break;
3049 default:
3050 *desc_len = 0;
3051 return -EINVAL;
3052 }
3053 return 0;
3054}
3055EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3056
da461cec
SJ
3057/**
3058 * ufshcd_read_desc_param - read the specified descriptor parameter
3059 * @hba: Pointer to adapter instance
3060 * @desc_id: descriptor idn value
3061 * @desc_index: descriptor index
3062 * @param_offset: offset of the parameter to read
3063 * @param_read_buf: pointer to buffer where parameter would be read
3064 * @param_size: sizeof(param_read_buf)
3065 *
3066 * Return 0 in case of success, non-zero otherwise
3067 */
45bced87
SN
3068int ufshcd_read_desc_param(struct ufs_hba *hba,
3069 enum desc_idn desc_id,
3070 int desc_index,
3071 u8 param_offset,
3072 u8 *param_read_buf,
3073 u8 param_size)
da461cec
SJ
3074{
3075 int ret;
3076 u8 *desc_buf;
a4b0e8a4 3077 int buff_len;
da461cec
SJ
3078 bool is_kmalloc = true;
3079
a4b0e8a4
PM
3080 /* Safety check */
3081 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
da461cec
SJ
3082 return -EINVAL;
3083
a4b0e8a4
PM
3084 /* Get the max length of descriptor from structure filled up at probe
3085 * time.
3086 */
3087 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
da461cec 3088
a4b0e8a4
PM
3089 /* Sanity checks */
3090 if (ret || !buff_len) {
3091 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3092 __func__);
3093 return ret;
3094 }
3095
3096 /* Check whether we need temp memory */
3097 if (param_offset != 0 || param_size < buff_len) {
da461cec
SJ
3098 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3099 if (!desc_buf)
3100 return -ENOMEM;
a4b0e8a4
PM
3101 } else {
3102 desc_buf = param_read_buf;
3103 is_kmalloc = false;
da461cec
SJ
3104 }
3105
a4b0e8a4 3106 /* Request for full descriptor */
a70e91b8 3107 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
a4b0e8a4
PM
3108 desc_id, desc_index, 0,
3109 desc_buf, &buff_len);
da461cec 3110
bde44bb6
SJ
3111 if (ret) {
3112 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3113 __func__, desc_id, desc_index, param_offset, ret);
da461cec
SJ
3114 goto out;
3115 }
3116
bde44bb6
SJ
3117 /* Sanity check */
3118 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3119 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3120 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3121 ret = -EINVAL;
3122 goto out;
3123 }
3124
a4b0e8a4
PM
3125 /* Check wherher we will not copy more data, than available */
3126 if (is_kmalloc && param_size > buff_len)
3127 param_size = buff_len;
bde44bb6 3128
da461cec
SJ
3129 if (is_kmalloc)
3130 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3131out:
3132 if (is_kmalloc)
3133 kfree(desc_buf);
3134 return ret;
3135}
3136
3137static inline int ufshcd_read_desc(struct ufs_hba *hba,
3138 enum desc_idn desc_id,
3139 int desc_index,
4b828fe1 3140 void *buf,
da461cec
SJ
3141 u32 size)
3142{
3143 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3144}
3145
3146static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3147 u8 *buf,
3148 u32 size)
3149{
dbd34a61 3150 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
da461cec
SJ
3151}
3152
8209b6d5 3153static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
b573d484
YG
3154{
3155 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3156}
b573d484 3157
4b828fe1
TW
3158/**
3159 * struct uc_string_id - unicode string
3160 *
3161 * @len: size of this descriptor inclusive
3162 * @type: descriptor type
3163 * @uc: unicode string character
3164 */
3165struct uc_string_id {
3166 u8 len;
3167 u8 type;
3168 wchar_t uc[0];
3169} __packed;
3170
3171/* replace non-printable or non-ASCII characters with spaces */
3172static inline char ufshcd_remove_non_printable(u8 ch)
3173{
3174 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3175}
3176
b573d484
YG
3177/**
3178 * ufshcd_read_string_desc - read string descriptor
3179 * @hba: pointer to adapter instance
3180 * @desc_index: descriptor index
4b828fe1
TW
3181 * @buf: pointer to buffer where descriptor would be read,
3182 * the caller should free the memory.
b573d484 3183 * @ascii: if true convert from unicode to ascii characters
4b828fe1 3184 * null terminated string.
b573d484 3185 *
4b828fe1
TW
3186 * Return:
3187 * * string size on success.
3188 * * -ENOMEM: on allocation failure
3189 * * -EINVAL: on a wrong parameter
b573d484 3190 */
4b828fe1
TW
3191int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3192 u8 **buf, bool ascii)
b573d484 3193{
4b828fe1
TW
3194 struct uc_string_id *uc_str;
3195 u8 *str;
3196 int ret;
b573d484 3197
4b828fe1
TW
3198 if (!buf)
3199 return -EINVAL;
b573d484 3200
4b828fe1
TW
3201 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3202 if (!uc_str)
3203 return -ENOMEM;
b573d484 3204
4b828fe1
TW
3205 ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING,
3206 desc_index, uc_str,
3207 QUERY_DESC_MAX_SIZE);
3208 if (ret < 0) {
3209 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3210 QUERY_REQ_RETRIES, ret);
3211 str = NULL;
3212 goto out;
3213 }
3214
3215 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3216 dev_dbg(hba->dev, "String Desc is of zero length\n");
3217 str = NULL;
3218 ret = 0;
b573d484
YG
3219 goto out;
3220 }
3221
3222 if (ascii) {
4b828fe1 3223 ssize_t ascii_len;
b573d484 3224 int i;
b573d484 3225 /* remove header and divide by 2 to move from UTF16 to UTF8 */
4b828fe1
TW
3226 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3227 str = kzalloc(ascii_len, GFP_KERNEL);
3228 if (!str) {
3229 ret = -ENOMEM;
fcbefc3b 3230 goto out;
b573d484
YG
3231 }
3232
3233 /*
3234 * the descriptor contains string in UTF16 format
3235 * we need to convert to utf-8 so it can be displayed
3236 */
4b828fe1
TW
3237 ret = utf16s_to_utf8s(uc_str->uc,
3238 uc_str->len - QUERY_DESC_HDR_SIZE,
3239 UTF16_BIG_ENDIAN, str, ascii_len);
b573d484
YG
3240
3241 /* replace non-printable or non-ASCII characters with spaces */
4b828fe1
TW
3242 for (i = 0; i < ret; i++)
3243 str[i] = ufshcd_remove_non_printable(str[i]);
b573d484 3244
4b828fe1
TW
3245 str[ret++] = '\0';
3246
3247 } else {
5f57704d 3248 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
4b828fe1
TW
3249 if (!str) {
3250 ret = -ENOMEM;
3251 goto out;
3252 }
4b828fe1 3253 ret = uc_str->len;
b573d484
YG
3254 }
3255out:
4b828fe1
TW
3256 *buf = str;
3257 kfree(uc_str);
3258 return ret;
b573d484 3259}
b573d484 3260
da461cec
SJ
3261/**
3262 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3263 * @hba: Pointer to adapter instance
3264 * @lun: lun id
3265 * @param_offset: offset of the parameter to read
3266 * @param_read_buf: pointer to buffer where parameter would be read
3267 * @param_size: sizeof(param_read_buf)
3268 *
3269 * Return 0 in case of success, non-zero otherwise
3270 */
3271static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3272 int lun,
3273 enum unit_desc_param param_offset,
3274 u8 *param_read_buf,
3275 u32 param_size)
3276{
3277 /*
3278 * Unit descriptors are only available for general purpose LUs (LUN id
3279 * from 0 to 7) and RPMB Well known LU.
3280 */
d829fc8a 3281 if (!ufs_is_valid_unit_desc_lun(lun))
da461cec
SJ
3282 return -EOPNOTSUPP;
3283
3284 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3285 param_offset, param_read_buf, param_size);
3286}
3287
7a3e97b0
SY
3288/**
3289 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3290 * @hba: per adapter instance
3291 *
3292 * 1. Allocate DMA memory for Command Descriptor array
3293 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3294 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3295 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3296 * (UTMRDL)
3297 * 4. Allocate memory for local reference block(lrb).
3298 *
3299 * Returns 0 for success, non-zero in case of failure
3300 */
3301static int ufshcd_memory_alloc(struct ufs_hba *hba)
3302{
3303 size_t utmrdl_size, utrdl_size, ucdl_size;
3304
3305 /* Allocate memory for UTP command descriptors */
3306 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
2953f850
SJ
3307 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3308 ucdl_size,
3309 &hba->ucdl_dma_addr,
3310 GFP_KERNEL);
7a3e97b0
SY
3311
3312 /*
3313 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3314 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3315 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3316 * be aligned to 128 bytes as well
3317 */
3318 if (!hba->ucdl_base_addr ||
3319 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3320 dev_err(hba->dev,
7a3e97b0
SY
3321 "Command Descriptor Memory allocation failed\n");
3322 goto out;
3323 }
3324
3325 /*
3326 * Allocate memory for UTP Transfer descriptors
3327 * UFSHCI requires 1024 byte alignment of UTRD
3328 */
3329 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2953f850
SJ
3330 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3331 utrdl_size,
3332 &hba->utrdl_dma_addr,
3333 GFP_KERNEL);
7a3e97b0
SY
3334 if (!hba->utrdl_base_addr ||
3335 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3336 dev_err(hba->dev,
7a3e97b0
SY
3337 "Transfer Descriptor Memory allocation failed\n");
3338 goto out;
3339 }
3340
3341 /*
3342 * Allocate memory for UTP Task Management descriptors
3343 * UFSHCI requires 1024 byte alignment of UTMRD
3344 */
3345 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2953f850
SJ
3346 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3347 utmrdl_size,
3348 &hba->utmrdl_dma_addr,
3349 GFP_KERNEL);
7a3e97b0
SY
3350 if (!hba->utmrdl_base_addr ||
3351 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 3352 dev_err(hba->dev,
7a3e97b0
SY
3353 "Task Management Descriptor Memory allocation failed\n");
3354 goto out;
3355 }
3356
3357 /* Allocate memory for local reference block */
a86854d0
KC
3358 hba->lrb = devm_kcalloc(hba->dev,
3359 hba->nutrs, sizeof(struct ufshcd_lrb),
2953f850 3360 GFP_KERNEL);
7a3e97b0 3361 if (!hba->lrb) {
3b1d0580 3362 dev_err(hba->dev, "LRB Memory allocation failed\n");
7a3e97b0
SY
3363 goto out;
3364 }
3365 return 0;
3366out:
7a3e97b0
SY
3367 return -ENOMEM;
3368}
3369
3370/**
3371 * ufshcd_host_memory_configure - configure local reference block with
3372 * memory offsets
3373 * @hba: per adapter instance
3374 *
3375 * Configure Host memory space
3376 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3377 * address.
3378 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3379 * and PRDT offset.
3380 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3381 * into local reference block.
3382 */
3383static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3384{
3385 struct utp_transfer_cmd_desc *cmd_descp;
3386 struct utp_transfer_req_desc *utrdlp;
3387 dma_addr_t cmd_desc_dma_addr;
3388 dma_addr_t cmd_desc_element_addr;
3389 u16 response_offset;
3390 u16 prdt_offset;
3391 int cmd_desc_size;
3392 int i;
3393
3394 utrdlp = hba->utrdl_base_addr;
3395 cmd_descp = hba->ucdl_base_addr;
3396
3397 response_offset =
3398 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3399 prdt_offset =
3400 offsetof(struct utp_transfer_cmd_desc, prd_table);
3401
3402 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3403 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3404
3405 for (i = 0; i < hba->nutrs; i++) {
3406 /* Configure UTRD with command descriptor base address */
3407 cmd_desc_element_addr =
3408 (cmd_desc_dma_addr + (cmd_desc_size * i));
3409 utrdlp[i].command_desc_base_addr_lo =
3410 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3411 utrdlp[i].command_desc_base_addr_hi =
3412 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3413
3414 /* Response upiu and prdt offset should be in double words */
75b1cc4a
KK
3415 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3416 utrdlp[i].response_upiu_offset =
3417 cpu_to_le16(response_offset);
3418 utrdlp[i].prd_table_offset =
3419 cpu_to_le16(prdt_offset);
3420 utrdlp[i].response_upiu_length =
3421 cpu_to_le16(ALIGNED_UPIU_SIZE);
3422 } else {
3423 utrdlp[i].response_upiu_offset =
7a3e97b0 3424 cpu_to_le16((response_offset >> 2));
75b1cc4a 3425 utrdlp[i].prd_table_offset =
7a3e97b0 3426 cpu_to_le16((prdt_offset >> 2));
75b1cc4a 3427 utrdlp[i].response_upiu_length =
3ca316c5 3428 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
75b1cc4a 3429 }
7a3e97b0
SY
3430
3431 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
ff8e20c6
DR
3432 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3433 (i * sizeof(struct utp_transfer_req_desc));
5a0b0cb9
SRT
3434 hba->lrb[i].ucd_req_ptr =
3435 (struct utp_upiu_req *)(cmd_descp + i);
ff8e20c6 3436 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
7a3e97b0
SY
3437 hba->lrb[i].ucd_rsp_ptr =
3438 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
ff8e20c6
DR
3439 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3440 response_offset;
7a3e97b0
SY
3441 hba->lrb[i].ucd_prdt_ptr =
3442 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
ff8e20c6
DR
3443 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3444 prdt_offset;
7a3e97b0
SY
3445 }
3446}
3447
3448/**
3449 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3450 * @hba: per adapter instance
3451 *
3452 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3453 * in order to initialize the Unipro link startup procedure.
3454 * Once the Unipro links are up, the device connected to the controller
3455 * is detected.
3456 *
3457 * Returns 0 on success, non-zero value on failure
3458 */
3459static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3460{
6ccf44fe
SJ
3461 struct uic_command uic_cmd = {0};
3462 int ret;
7a3e97b0 3463
6ccf44fe 3464 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
7a3e97b0 3465
6ccf44fe
SJ
3466 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3467 if (ret)
ff8e20c6 3468 dev_dbg(hba->dev,
6ccf44fe
SJ
3469 "dme-link-startup: error code %d\n", ret);
3470 return ret;
7a3e97b0 3471}
4404c5de
AA
3472/**
3473 * ufshcd_dme_reset - UIC command for DME_RESET
3474 * @hba: per adapter instance
3475 *
3476 * DME_RESET command is issued in order to reset UniPro stack.
3477 * This function now deal with cold reset.
3478 *
3479 * Returns 0 on success, non-zero value on failure
3480 */
3481static int ufshcd_dme_reset(struct ufs_hba *hba)
3482{
3483 struct uic_command uic_cmd = {0};
3484 int ret;
3485
3486 uic_cmd.command = UIC_CMD_DME_RESET;
3487
3488 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3489 if (ret)
3490 dev_err(hba->dev,
3491 "dme-reset: error code %d\n", ret);
3492
3493 return ret;
3494}
3495
3496/**
3497 * ufshcd_dme_enable - UIC command for DME_ENABLE
3498 * @hba: per adapter instance
3499 *
3500 * DME_ENABLE command is issued in order to enable UniPro stack.
3501 *
3502 * Returns 0 on success, non-zero value on failure
3503 */
3504static int ufshcd_dme_enable(struct ufs_hba *hba)
3505{
3506 struct uic_command uic_cmd = {0};
3507 int ret;
3508
3509 uic_cmd.command = UIC_CMD_DME_ENABLE;
3510
3511 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3512 if (ret)
3513 dev_err(hba->dev,
3514 "dme-reset: error code %d\n", ret);
3515
3516 return ret;
3517}
7a3e97b0 3518
cad2e03d
YG
3519static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3520{
3521 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3522 unsigned long min_sleep_time_us;
3523
3524 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3525 return;
3526
3527 /*
3528 * last_dme_cmd_tstamp will be 0 only for 1st call to
3529 * this function
3530 */
3531 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3532 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3533 } else {
3534 unsigned long delta =
3535 (unsigned long) ktime_to_us(
3536 ktime_sub(ktime_get(),
3537 hba->last_dme_cmd_tstamp));
3538
3539 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3540 min_sleep_time_us =
3541 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3542 else
3543 return; /* no more delay required */
3544 }
3545
3546 /* allow sleep for extra 50us if needed */
3547 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3548}
3549
12b4fdb4
SJ
3550/**
3551 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3552 * @hba: per adapter instance
3553 * @attr_sel: uic command argument1
3554 * @attr_set: attribute set type as uic command argument2
3555 * @mib_val: setting value as uic command argument3
3556 * @peer: indicate whether peer or local
3557 *
3558 * Returns 0 on success, non-zero value on failure
3559 */
3560int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3561 u8 attr_set, u32 mib_val, u8 peer)
3562{
3563 struct uic_command uic_cmd = {0};
3564 static const char *const action[] = {
3565 "dme-set",
3566 "dme-peer-set"
3567 };
3568 const char *set = action[!!peer];
3569 int ret;
64238fbd 3570 int retries = UFS_UIC_COMMAND_RETRIES;
12b4fdb4
SJ
3571
3572 uic_cmd.command = peer ?
3573 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3574 uic_cmd.argument1 = attr_sel;
3575 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3576 uic_cmd.argument3 = mib_val;
3577
64238fbd
YG
3578 do {
3579 /* for peer attributes we retry upon failure */
3580 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3581 if (ret)
3582 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3583 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3584 } while (ret && peer && --retries);
3585
f37e9f8c 3586 if (ret)
64238fbd 3587 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
f37e9f8c
YG
3588 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3589 UFS_UIC_COMMAND_RETRIES - retries);
12b4fdb4
SJ
3590
3591 return ret;
3592}
3593EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3594
3595/**
3596 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3597 * @hba: per adapter instance
3598 * @attr_sel: uic command argument1
3599 * @mib_val: the value of the attribute as returned by the UIC command
3600 * @peer: indicate whether peer or local
3601 *
3602 * Returns 0 on success, non-zero value on failure
3603 */
3604int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3605 u32 *mib_val, u8 peer)
3606{
3607 struct uic_command uic_cmd = {0};
3608 static const char *const action[] = {
3609 "dme-get",
3610 "dme-peer-get"
3611 };
3612 const char *get = action[!!peer];
3613 int ret;
64238fbd 3614 int retries = UFS_UIC_COMMAND_RETRIES;
874237f7
YG
3615 struct ufs_pa_layer_attr orig_pwr_info;
3616 struct ufs_pa_layer_attr temp_pwr_info;
3617 bool pwr_mode_change = false;
3618
3619 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3620 orig_pwr_info = hba->pwr_info;
3621 temp_pwr_info = orig_pwr_info;
3622
3623 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3624 orig_pwr_info.pwr_rx == FAST_MODE) {
3625 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3626 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3627 pwr_mode_change = true;
3628 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3629 orig_pwr_info.pwr_rx == SLOW_MODE) {
3630 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3631 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3632 pwr_mode_change = true;
3633 }
3634 if (pwr_mode_change) {
3635 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3636 if (ret)
3637 goto out;
3638 }
3639 }
12b4fdb4
SJ
3640
3641 uic_cmd.command = peer ?
3642 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3643 uic_cmd.argument1 = attr_sel;
3644
64238fbd
YG
3645 do {
3646 /* for peer attributes we retry upon failure */
3647 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3648 if (ret)
3649 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3650 get, UIC_GET_ATTR_ID(attr_sel), ret);
3651 } while (ret && peer && --retries);
3652
f37e9f8c 3653 if (ret)
64238fbd 3654 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
f37e9f8c
YG
3655 get, UIC_GET_ATTR_ID(attr_sel),
3656 UFS_UIC_COMMAND_RETRIES - retries);
12b4fdb4 3657
64238fbd 3658 if (mib_val && !ret)
12b4fdb4 3659 *mib_val = uic_cmd.argument3;
874237f7
YG
3660
3661 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3662 && pwr_mode_change)
3663 ufshcd_change_power_mode(hba, &orig_pwr_info);
12b4fdb4
SJ
3664out:
3665 return ret;
3666}
3667EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3668
53b3d9c3 3669/**
57d104c1
SJ
3670 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3671 * state) and waits for it to take effect.
3672 *
53b3d9c3 3673 * @hba: per adapter instance
57d104c1
SJ
3674 * @cmd: UIC command to execute
3675 *
3676 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3677 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3678 * and device UniPro link and hence it's final completion would be indicated by
3679 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3680 * addition to normal UIC command completion Status (UCCS). This function only
3681 * returns after the relevant status bits indicate the completion.
53b3d9c3
SJ
3682 *
3683 * Returns 0 on success, non-zero value on failure
3684 */
57d104c1 3685static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
53b3d9c3 3686{
57d104c1 3687 struct completion uic_async_done;
53b3d9c3
SJ
3688 unsigned long flags;
3689 u8 status;
3690 int ret;
d75f7fe4 3691 bool reenable_intr = false;
53b3d9c3 3692
53b3d9c3 3693 mutex_lock(&hba->uic_cmd_mutex);
57d104c1 3694 init_completion(&uic_async_done);
cad2e03d 3695 ufshcd_add_delay_before_dme_cmd(hba);
53b3d9c3
SJ
3696
3697 spin_lock_irqsave(hba->host->host_lock, flags);
57d104c1 3698 hba->uic_async_done = &uic_async_done;
d75f7fe4
YG
3699 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3700 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3701 /*
3702 * Make sure UIC command completion interrupt is disabled before
3703 * issuing UIC command.
3704 */
3705 wmb();
3706 reenable_intr = true;
57d104c1 3707 }
d75f7fe4
YG
3708 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3709 spin_unlock_irqrestore(hba->host->host_lock, flags);
57d104c1
SJ
3710 if (ret) {
3711 dev_err(hba->dev,
3712 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3713 cmd->command, cmd->argument3, ret);
53b3d9c3
SJ
3714 goto out;
3715 }
3716
57d104c1 3717 if (!wait_for_completion_timeout(hba->uic_async_done,
53b3d9c3
SJ
3718 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3719 dev_err(hba->dev,
57d104c1
SJ
3720 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3721 cmd->command, cmd->argument3);
53b3d9c3
SJ
3722 ret = -ETIMEDOUT;
3723 goto out;
3724 }
3725
3726 status = ufshcd_get_upmcrs(hba);
3727 if (status != PWR_LOCAL) {
3728 dev_err(hba->dev,
479da360 3729 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
57d104c1 3730 cmd->command, status);
53b3d9c3
SJ
3731 ret = (status != PWR_OK) ? status : -1;
3732 }
3733out:
7942f7b5
VG
3734 if (ret) {
3735 ufshcd_print_host_state(hba);
3736 ufshcd_print_pwr_info(hba);
3737 ufshcd_print_host_regs(hba);
3738 }
3739
53b3d9c3 3740 spin_lock_irqsave(hba->host->host_lock, flags);
d75f7fe4 3741 hba->active_uic_cmd = NULL;
57d104c1 3742 hba->uic_async_done = NULL;
d75f7fe4
YG
3743 if (reenable_intr)
3744 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
53b3d9c3
SJ
3745 spin_unlock_irqrestore(hba->host->host_lock, flags);
3746 mutex_unlock(&hba->uic_cmd_mutex);
1ab27c9c 3747
53b3d9c3
SJ
3748 return ret;
3749}
3750
57d104c1
SJ
3751/**
3752 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3753 * using DME_SET primitives.
3754 * @hba: per adapter instance
3755 * @mode: powr mode value
3756 *
3757 * Returns 0 on success, non-zero value on failure
3758 */
3759static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3760{
3761 struct uic_command uic_cmd = {0};
1ab27c9c 3762 int ret;
57d104c1 3763
c3a2f9ee
YG
3764 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3765 ret = ufshcd_dme_set(hba,
3766 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3767 if (ret) {
3768 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3769 __func__, ret);
3770 goto out;
3771 }
3772 }
3773
57d104c1
SJ
3774 uic_cmd.command = UIC_CMD_DME_SET;
3775 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3776 uic_cmd.argument3 = mode;
1ab27c9c
ST
3777 ufshcd_hold(hba, false);
3778 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3779 ufshcd_release(hba);
57d104c1 3780
c3a2f9ee 3781out:
1ab27c9c 3782 return ret;
57d104c1
SJ
3783}
3784
53c12d0e
YG
3785static int ufshcd_link_recovery(struct ufs_hba *hba)
3786{
3787 int ret;
3788 unsigned long flags;
3789
3790 spin_lock_irqsave(hba->host->host_lock, flags);
3791 hba->ufshcd_state = UFSHCD_STATE_RESET;
3792 ufshcd_set_eh_in_progress(hba);
3793 spin_unlock_irqrestore(hba->host->host_lock, flags);
3794
ebdd1dfd
CG
3795 /* Reset the attached device */
3796 ufshcd_vops_device_reset(hba);
3797
53c12d0e
YG
3798 ret = ufshcd_host_reset_and_restore(hba);
3799
3800 spin_lock_irqsave(hba->host->host_lock, flags);
3801 if (ret)
3802 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3803 ufshcd_clear_eh_in_progress(hba);
3804 spin_unlock_irqrestore(hba->host->host_lock, flags);
3805
3806 if (ret)
3807 dev_err(hba->dev, "%s: link recovery failed, err %d",
3808 __func__, ret);
3809
3810 return ret;
3811}
3812
87d0b4a6 3813static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
57d104c1 3814{
87d0b4a6 3815 int ret;
57d104c1 3816 struct uic_command uic_cmd = {0};
911a0771 3817 ktime_t start = ktime_get();
57d104c1 3818
ee32c909
KK
3819 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3820
57d104c1 3821 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
87d0b4a6 3822 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
911a0771
SJ
3823 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3824 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
87d0b4a6 3825
53c12d0e 3826 if (ret) {
6d303e4b
SJ
3827 int err;
3828
87d0b4a6
YG
3829 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3830 __func__, ret);
3831
53c12d0e 3832 /*
6d303e4b
SJ
3833 * If link recovery fails then return error code returned from
3834 * ufshcd_link_recovery().
3835 * If link recovery succeeds then return -EAGAIN to attempt
3836 * hibern8 enter retry again.
53c12d0e 3837 */
6d303e4b
SJ
3838 err = ufshcd_link_recovery(hba);
3839 if (err) {
3840 dev_err(hba->dev, "%s: link recovery failed", __func__);
3841 ret = err;
3842 } else {
3843 ret = -EAGAIN;
3844 }
ee32c909
KK
3845 } else
3846 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3847 POST_CHANGE);
53c12d0e 3848
87d0b4a6
YG
3849 return ret;
3850}
3851
3852static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3853{
3854 int ret = 0, retries;
57d104c1 3855
87d0b4a6
YG
3856 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3857 ret = __ufshcd_uic_hibern8_enter(hba);
6d303e4b 3858 if (!ret)
87d0b4a6
YG
3859 goto out;
3860 }
3861out:
3862 return ret;
57d104c1
SJ
3863}
3864
3865static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3866{
3867 struct uic_command uic_cmd = {0};
3868 int ret;
911a0771 3869 ktime_t start = ktime_get();
57d104c1 3870
ee32c909
KK
3871 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3872
57d104c1
SJ
3873 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3874 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
911a0771
SJ
3875 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3876 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3877
57d104c1 3878 if (ret) {
53c12d0e
YG
3879 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3880 __func__, ret);
3881 ret = ufshcd_link_recovery(hba);
ff8e20c6 3882 } else {
ee32c909
KK
3883 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3884 POST_CHANGE);
ff8e20c6
DR
3885 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3886 hba->ufs_stats.hibern8_exit_cnt++;
3887 }
57d104c1
SJ
3888
3889 return ret;
3890}
3891
71d848b8 3892void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
ad448378
AH
3893{
3894 unsigned long flags;
3895
ee5f1042 3896 if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
ad448378
AH
3897 return;
3898
3899 spin_lock_irqsave(hba->host->host_lock, flags);
3900 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
3901 spin_unlock_irqrestore(hba->host->host_lock, flags);
3902}
3903
5064636c
YG
3904 /**
3905 * ufshcd_init_pwr_info - setting the POR (power on reset)
3906 * values in hba power info
3907 * @hba: per-adapter instance
3908 */
3909static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3910{
3911 hba->pwr_info.gear_rx = UFS_PWM_G1;
3912 hba->pwr_info.gear_tx = UFS_PWM_G1;
3913 hba->pwr_info.lane_rx = 1;
3914 hba->pwr_info.lane_tx = 1;
3915 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3916 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3917 hba->pwr_info.hs_rate = 0;
3918}
3919
d3e89bac 3920/**
7eb584db
DR
3921 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3922 * @hba: per-adapter instance
d3e89bac 3923 */
7eb584db 3924static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
d3e89bac 3925{
7eb584db
DR
3926 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3927
3928 if (hba->max_pwr_info.is_valid)
3929 return 0;
3930
2349b533
SJ
3931 pwr_info->pwr_tx = FAST_MODE;
3932 pwr_info->pwr_rx = FAST_MODE;
7eb584db 3933 pwr_info->hs_rate = PA_HS_MODE_B;
d3e89bac
SJ
3934
3935 /* Get the connected lane count */
7eb584db
DR
3936 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3937 &pwr_info->lane_rx);
3938 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3939 &pwr_info->lane_tx);
3940
3941 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3942 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3943 __func__,
3944 pwr_info->lane_rx,
3945 pwr_info->lane_tx);
3946 return -EINVAL;
3947 }
d3e89bac
SJ
3948
3949 /*
3950 * First, get the maximum gears of HS speed.
3951 * If a zero value, it means there is no HSGEAR capability.
3952 * Then, get the maximum gears of PWM speed.
3953 */
7eb584db
DR
3954 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
3955 if (!pwr_info->gear_rx) {
3956 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3957 &pwr_info->gear_rx);
3958 if (!pwr_info->gear_rx) {
3959 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
3960 __func__, pwr_info->gear_rx);
3961 return -EINVAL;
3962 }
2349b533 3963 pwr_info->pwr_rx = SLOW_MODE;
d3e89bac
SJ
3964 }
3965
7eb584db
DR
3966 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
3967 &pwr_info->gear_tx);
3968 if (!pwr_info->gear_tx) {
d3e89bac 3969 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
7eb584db
DR
3970 &pwr_info->gear_tx);
3971 if (!pwr_info->gear_tx) {
3972 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
3973 __func__, pwr_info->gear_tx);
3974 return -EINVAL;
3975 }
2349b533 3976 pwr_info->pwr_tx = SLOW_MODE;
7eb584db
DR
3977 }
3978
3979 hba->max_pwr_info.is_valid = true;
3980 return 0;
3981}
3982
3983static int ufshcd_change_power_mode(struct ufs_hba *hba,
3984 struct ufs_pa_layer_attr *pwr_mode)
3985{
3986 int ret;
3987
3988 /* if already configured to the requested pwr_mode */
3989 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
3990 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
3991 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
3992 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
3993 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
3994 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
3995 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
3996 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
3997 return 0;
d3e89bac
SJ
3998 }
3999
4000 /*
4001 * Configure attributes for power mode change with below.
4002 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4003 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4004 * - PA_HSSERIES
4005 */
7eb584db
DR
4006 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4007 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4008 pwr_mode->lane_rx);
4009 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4010 pwr_mode->pwr_rx == FAST_MODE)
d3e89bac 4011 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
7eb584db
DR
4012 else
4013 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
d3e89bac 4014
7eb584db
DR
4015 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4016 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4017 pwr_mode->lane_tx);
4018 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4019 pwr_mode->pwr_tx == FAST_MODE)
d3e89bac 4020 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
7eb584db
DR
4021 else
4022 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
d3e89bac 4023
7eb584db
DR
4024 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4025 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4026 pwr_mode->pwr_rx == FAST_MODE ||
4027 pwr_mode->pwr_tx == FAST_MODE)
4028 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4029 pwr_mode->hs_rate);
d3e89bac 4030
08342537
CG
4031 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4032 DL_FC0ProtectionTimeOutVal_Default);
4033 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4034 DL_TC0ReplayTimeOutVal_Default);
4035 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4036 DL_AFC0ReqTimeOutVal_Default);
4037 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4038 DL_FC1ProtectionTimeOutVal_Default);
4039 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4040 DL_TC1ReplayTimeOutVal_Default);
4041 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4042 DL_AFC1ReqTimeOutVal_Default);
4043
4044 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4045 DL_FC0ProtectionTimeOutVal_Default);
4046 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4047 DL_TC0ReplayTimeOutVal_Default);
4048 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4049 DL_AFC0ReqTimeOutVal_Default);
4050
7eb584db
DR
4051 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4052 | pwr_mode->pwr_tx);
4053
4054 if (ret) {
d3e89bac 4055 dev_err(hba->dev,
7eb584db
DR
4056 "%s: power mode change failed %d\n", __func__, ret);
4057 } else {
0263bcd0
YG
4058 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4059 pwr_mode);
7eb584db
DR
4060
4061 memcpy(&hba->pwr_info, pwr_mode,
4062 sizeof(struct ufs_pa_layer_attr));
4063 }
4064
4065 return ret;
4066}
4067
4068/**
4069 * ufshcd_config_pwr_mode - configure a new power mode
4070 * @hba: per-adapter instance
4071 * @desired_pwr_mode: desired power configuration
4072 */
0d846e70 4073int ufshcd_config_pwr_mode(struct ufs_hba *hba,
7eb584db
DR
4074 struct ufs_pa_layer_attr *desired_pwr_mode)
4075{
4076 struct ufs_pa_layer_attr final_params = { 0 };
4077 int ret;
4078
0263bcd0
YG
4079 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4080 desired_pwr_mode, &final_params);
4081
4082 if (ret)
7eb584db
DR
4083 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4084
4085 ret = ufshcd_change_power_mode(hba, &final_params);
a3cd5ec5
SJ
4086 if (!ret)
4087 ufshcd_print_pwr_info(hba);
d3e89bac
SJ
4088
4089 return ret;
4090}
0d846e70 4091EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
d3e89bac 4092
68078d5c
DR
4093/**
4094 * ufshcd_complete_dev_init() - checks device readiness
8aa29f19 4095 * @hba: per-adapter instance
68078d5c
DR
4096 *
4097 * Set fDeviceInit flag and poll until device toggles it.
4098 */
4099static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4100{
dc3c8d3a
YG
4101 int i;
4102 int err;
68078d5c
DR
4103 bool flag_res = 1;
4104
dc3c8d3a
YG
4105 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4106 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
68078d5c
DR
4107 if (err) {
4108 dev_err(hba->dev,
4109 "%s setting fDeviceInit flag failed with error %d\n",
4110 __func__, err);
4111 goto out;
4112 }
4113
dc3c8d3a
YG
4114 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4115 for (i = 0; i < 1000 && !err && flag_res; i++)
4116 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4117 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4118
68078d5c
DR
4119 if (err)
4120 dev_err(hba->dev,
4121 "%s reading fDeviceInit flag failed with error %d\n",
4122 __func__, err);
4123 else if (flag_res)
4124 dev_err(hba->dev,
4125 "%s fDeviceInit was not cleared by the device\n",
4126 __func__);
4127
4128out:
4129 return err;
4130}
4131
7a3e97b0
SY
4132/**
4133 * ufshcd_make_hba_operational - Make UFS controller operational
4134 * @hba: per adapter instance
4135 *
4136 * To bring UFS host controller to operational state,
5c0c28a8
SRT
4137 * 1. Enable required interrupts
4138 * 2. Configure interrupt aggregation
897efe62 4139 * 3. Program UTRL and UTMRL base address
5c0c28a8 4140 * 4. Configure run-stop-registers
7a3e97b0
SY
4141 *
4142 * Returns 0 on success, non-zero value on failure
4143 */
4144static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4145{
4146 int err = 0;
4147 u32 reg;
4148
6ccf44fe
SJ
4149 /* Enable required interrupts */
4150 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4151
4152 /* Configure interrupt aggregation */
b852190e
YG
4153 if (ufshcd_is_intr_aggr_allowed(hba))
4154 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4155 else
4156 ufshcd_disable_intr_aggr(hba);
6ccf44fe
SJ
4157
4158 /* Configure UTRL and UTMRL base address registers */
4159 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4160 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4161 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4162 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4163 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4164 REG_UTP_TASK_REQ_LIST_BASE_L);
4165 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4166 REG_UTP_TASK_REQ_LIST_BASE_H);
4167
897efe62
YG
4168 /*
4169 * Make sure base address and interrupt setup are updated before
4170 * enabling the run/stop registers below.
4171 */
4172 wmb();
4173
7a3e97b0
SY
4174 /*
4175 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
7a3e97b0 4176 */
5c0c28a8 4177 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
7a3e97b0
SY
4178 if (!(ufshcd_get_lists_status(reg))) {
4179 ufshcd_enable_run_stop_reg(hba);
4180 } else {
3b1d0580 4181 dev_err(hba->dev,
7a3e97b0
SY
4182 "Host controller not ready to process requests");
4183 err = -EIO;
4184 goto out;
4185 }
4186
7a3e97b0
SY
4187out:
4188 return err;
4189}
4190
596585a2
YG
4191/**
4192 * ufshcd_hba_stop - Send controller to reset state
4193 * @hba: per adapter instance
4194 * @can_sleep: perform sleep or just spin
4195 */
4196static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4197{
4198 int err;
4199
4200 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4201 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4202 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4203 10, 1, can_sleep);
4204 if (err)
4205 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4206}
4207
7a3e97b0 4208/**
4404c5de 4209 * ufshcd_hba_execute_hce - initialize the controller
7a3e97b0
SY
4210 * @hba: per adapter instance
4211 *
4212 * The controller resets itself and controller firmware initialization
4213 * sequence kicks off. When controller is ready it will set
4214 * the Host Controller Enable bit to 1.
4215 *
4216 * Returns 0 on success, non-zero value on failure
4217 */
4404c5de 4218static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
7a3e97b0
SY
4219{
4220 int retry;
4221
596585a2 4222 if (!ufshcd_is_hba_active(hba))
7a3e97b0 4223 /* change controller state to "reset state" */
596585a2 4224 ufshcd_hba_stop(hba, true);
7a3e97b0 4225
57d104c1
SJ
4226 /* UniPro link is disabled at this point */
4227 ufshcd_set_link_off(hba);
4228
0263bcd0 4229 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
5c0c28a8 4230
7a3e97b0
SY
4231 /* start controller initialization sequence */
4232 ufshcd_hba_start(hba);
4233
4234 /*
4235 * To initialize a UFS host controller HCE bit must be set to 1.
4236 * During initialization the HCE bit value changes from 1->0->1.
4237 * When the host controller completes initialization sequence
4238 * it sets the value of HCE bit to 1. The same HCE bit is read back
4239 * to check if the controller has completed initialization sequence.
4240 * So without this delay the value HCE = 1, set in the previous
4241 * instruction might be read back.
4242 * This delay can be changed based on the controller.
4243 */
838c1efc 4244 usleep_range(1000, 1100);
7a3e97b0
SY
4245
4246 /* wait for the host controller to complete initialization */
4247 retry = 10;
4248 while (ufshcd_is_hba_active(hba)) {
4249 if (retry) {
4250 retry--;
4251 } else {
3b1d0580 4252 dev_err(hba->dev,
7a3e97b0
SY
4253 "Controller enable failed\n");
4254 return -EIO;
4255 }
838c1efc 4256 usleep_range(5000, 5100);
7a3e97b0 4257 }
5c0c28a8 4258
1d337ec2 4259 /* enable UIC related interrupts */
57d104c1 4260 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
1d337ec2 4261
0263bcd0 4262 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
5c0c28a8 4263
7a3e97b0
SY
4264 return 0;
4265}
4266
4404c5de
AA
4267static int ufshcd_hba_enable(struct ufs_hba *hba)
4268{
4269 int ret;
4270
4271 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4272 ufshcd_set_link_off(hba);
4273 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4274
4275 /* enable UIC related interrupts */
4276 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4277 ret = ufshcd_dme_reset(hba);
4278 if (!ret) {
4279 ret = ufshcd_dme_enable(hba);
4280 if (!ret)
4281 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4282 if (ret)
4283 dev_err(hba->dev,
4284 "Host controller enable failed with non-hce\n");
4285 }
4286 } else {
4287 ret = ufshcd_hba_execute_hce(hba);
4288 }
4289
4290 return ret;
4291}
7ca38cf3
YG
4292static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4293{
4294 int tx_lanes, i, err = 0;
4295
4296 if (!peer)
4297 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4298 &tx_lanes);
4299 else
4300 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4301 &tx_lanes);
4302 for (i = 0; i < tx_lanes; i++) {
4303 if (!peer)
4304 err = ufshcd_dme_set(hba,
4305 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4306 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4307 0);
4308 else
4309 err = ufshcd_dme_peer_set(hba,
4310 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4311 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4312 0);
4313 if (err) {
4314 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4315 __func__, peer, i, err);
4316 break;
4317 }
4318 }
4319
4320 return err;
4321}
4322
4323static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4324{
4325 return ufshcd_disable_tx_lcc(hba, true);
4326}
4327
8808b4e9
SC
4328static void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
4329 u32 reg)
4330{
4331 reg_hist->reg[reg_hist->pos] = reg;
4332 reg_hist->tstamp[reg_hist->pos] = ktime_get();
4333 reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
4334}
4335
7a3e97b0 4336/**
6ccf44fe 4337 * ufshcd_link_startup - Initialize unipro link startup
7a3e97b0
SY
4338 * @hba: per adapter instance
4339 *
6ccf44fe 4340 * Returns 0 for success, non-zero in case of failure
7a3e97b0 4341 */
6ccf44fe 4342static int ufshcd_link_startup(struct ufs_hba *hba)
7a3e97b0 4343{
6ccf44fe 4344 int ret;
1d337ec2 4345 int retries = DME_LINKSTARTUP_RETRIES;
7caf489b 4346 bool link_startup_again = false;
7a3e97b0 4347
7caf489b
SJ
4348 /*
4349 * If UFS device isn't active then we will have to issue link startup
4350 * 2 times to make sure the device state move to active.
4351 */
4352 if (!ufshcd_is_ufs_dev_active(hba))
4353 link_startup_again = true;
7a3e97b0 4354
7caf489b 4355link_startup:
1d337ec2 4356 do {
0263bcd0 4357 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
6ccf44fe 4358
1d337ec2 4359 ret = ufshcd_dme_link_startup(hba);
5c0c28a8 4360
1d337ec2
SRT
4361 /* check if device is detected by inter-connect layer */
4362 if (!ret && !ufshcd_is_device_present(hba)) {
8808b4e9
SC
4363 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4364 0);
1d337ec2
SRT
4365 dev_err(hba->dev, "%s: Device not present\n", __func__);
4366 ret = -ENXIO;
4367 goto out;
4368 }
6ccf44fe 4369
1d337ec2
SRT
4370 /*
4371 * DME link lost indication is only received when link is up,
4372 * but we can't be sure if the link is up until link startup
4373 * succeeds. So reset the local Uni-Pro and try again.
4374 */
8808b4e9
SC
4375 if (ret && ufshcd_hba_enable(hba)) {
4376 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4377 (u32)ret);
1d337ec2 4378 goto out;
8808b4e9 4379 }
1d337ec2
SRT
4380 } while (ret && retries--);
4381
8808b4e9 4382 if (ret) {
1d337ec2 4383 /* failed to get the link up... retire */
8808b4e9
SC
4384 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4385 (u32)ret);
5c0c28a8 4386 goto out;
8808b4e9 4387 }
5c0c28a8 4388
7caf489b
SJ
4389 if (link_startup_again) {
4390 link_startup_again = false;
4391 retries = DME_LINKSTARTUP_RETRIES;
4392 goto link_startup;
4393 }
4394
d2aebb9b
SJ
4395 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4396 ufshcd_init_pwr_info(hba);
4397 ufshcd_print_pwr_info(hba);
4398
7ca38cf3
YG
4399 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4400 ret = ufshcd_disable_device_tx_lcc(hba);
4401 if (ret)
4402 goto out;
4403 }
4404
5c0c28a8 4405 /* Include any host controller configuration via UIC commands */
0263bcd0
YG
4406 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4407 if (ret)
4408 goto out;
7a3e97b0 4409
5c0c28a8 4410 ret = ufshcd_make_hba_operational(hba);
6ccf44fe 4411out:
7942f7b5 4412 if (ret) {
6ccf44fe 4413 dev_err(hba->dev, "link startup failed %d\n", ret);
7942f7b5
VG
4414 ufshcd_print_host_state(hba);
4415 ufshcd_print_pwr_info(hba);
4416 ufshcd_print_host_regs(hba);
4417 }
6ccf44fe 4418 return ret;
7a3e97b0
SY
4419}
4420
5a0b0cb9
SRT
4421/**
4422 * ufshcd_verify_dev_init() - Verify device initialization
4423 * @hba: per-adapter instance
4424 *
4425 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4426 * device Transport Protocol (UTP) layer is ready after a reset.
4427 * If the UTP layer at the device side is not initialized, it may
4428 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4429 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4430 */
4431static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4432{
4433 int err = 0;
4434 int retries;
4435
1ab27c9c 4436 ufshcd_hold(hba, false);
5a0b0cb9
SRT
4437 mutex_lock(&hba->dev_cmd.lock);
4438 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4439 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4440 NOP_OUT_TIMEOUT);
4441
4442 if (!err || err == -ETIMEDOUT)
4443 break;
4444
4445 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4446 }
4447 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 4448 ufshcd_release(hba);
5a0b0cb9
SRT
4449
4450 if (err)
4451 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4452 return err;
4453}
4454
0ce147d4
SJ
4455/**
4456 * ufshcd_set_queue_depth - set lun queue depth
4457 * @sdev: pointer to SCSI device
4458 *
4459 * Read bLUQueueDepth value and activate scsi tagged command
4460 * queueing. For WLUN, queue depth is set to 1. For best-effort
4461 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4462 * value that host can queue.
4463 */
4464static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4465{
4466 int ret = 0;
4467 u8 lun_qdepth;
4468 struct ufs_hba *hba;
4469
4470 hba = shost_priv(sdev->host);
4471
4472 lun_qdepth = hba->nutrs;
dbd34a61
SM
4473 ret = ufshcd_read_unit_desc_param(hba,
4474 ufshcd_scsi_to_upiu_lun(sdev->lun),
4475 UNIT_DESC_PARAM_LU_Q_DEPTH,
4476 &lun_qdepth,
4477 sizeof(lun_qdepth));
0ce147d4
SJ
4478
4479 /* Some WLUN doesn't support unit descriptor */
4480 if (ret == -EOPNOTSUPP)
4481 lun_qdepth = 1;
4482 else if (!lun_qdepth)
4483 /* eventually, we can figure out the real queue depth */
4484 lun_qdepth = hba->nutrs;
4485 else
4486 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4487
4488 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4489 __func__, lun_qdepth);
db5ed4df 4490 scsi_change_queue_depth(sdev, lun_qdepth);
0ce147d4
SJ
4491}
4492
57d104c1
SJ
4493/*
4494 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4495 * @hba: per-adapter instance
4496 * @lun: UFS device lun id
4497 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4498 *
4499 * Returns 0 in case of success and b_lu_write_protect status would be returned
4500 * @b_lu_write_protect parameter.
4501 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4502 * Returns -EINVAL in case of invalid parameters passed to this function.
4503 */
4504static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4505 u8 lun,
4506 u8 *b_lu_write_protect)
4507{
4508 int ret;
4509
4510 if (!b_lu_write_protect)
4511 ret = -EINVAL;
4512 /*
4513 * According to UFS device spec, RPMB LU can't be write
4514 * protected so skip reading bLUWriteProtect parameter for
4515 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4516 */
4517 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4518 ret = -ENOTSUPP;
4519 else
4520 ret = ufshcd_read_unit_desc_param(hba,
4521 lun,
4522 UNIT_DESC_PARAM_LU_WR_PROTECT,
4523 b_lu_write_protect,
4524 sizeof(*b_lu_write_protect));
4525 return ret;
4526}
4527
4528/**
4529 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4530 * status
4531 * @hba: per-adapter instance
4532 * @sdev: pointer to SCSI device
4533 *
4534 */
4535static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4536 struct scsi_device *sdev)
4537{
4538 if (hba->dev_info.f_power_on_wp_en &&
4539 !hba->dev_info.is_lu_power_on_wp) {
4540 u8 b_lu_write_protect;
4541
4542 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4543 &b_lu_write_protect) &&
4544 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4545 hba->dev_info.is_lu_power_on_wp = true;
4546 }
4547}
4548
7a3e97b0
SY
4549/**
4550 * ufshcd_slave_alloc - handle initial SCSI device configurations
4551 * @sdev: pointer to SCSI device
4552 *
4553 * Returns success
4554 */
4555static int ufshcd_slave_alloc(struct scsi_device *sdev)
4556{
4557 struct ufs_hba *hba;
4558
4559 hba = shost_priv(sdev->host);
7a3e97b0
SY
4560
4561 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4562 sdev->use_10_for_ms = 1;
a3a76391
CG
4563
4564 /* DBD field should be set to 1 in mode sense(10) */
4565 sdev->set_dbd_for_ms = 1;
7a3e97b0 4566
e8e7f271
SRT
4567 /* allow SCSI layer to restart the device in case of errors */
4568 sdev->allow_restart = 1;
4264fd61 4569
b2a6c522
SRT
4570 /* REPORT SUPPORTED OPERATION CODES is not supported */
4571 sdev->no_report_opcodes = 1;
4572
84af7e8b
SRT
4573 /* WRITE_SAME command is not supported */
4574 sdev->no_write_same = 1;
e8e7f271 4575
0ce147d4 4576 ufshcd_set_queue_depth(sdev);
4264fd61 4577
57d104c1
SJ
4578 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4579
7a3e97b0
SY
4580 return 0;
4581}
4582
4264fd61
SRT
4583/**
4584 * ufshcd_change_queue_depth - change queue depth
4585 * @sdev: pointer to SCSI device
4586 * @depth: required depth to set
4264fd61 4587 *
db5ed4df 4588 * Change queue depth and make sure the max. limits are not crossed.
4264fd61 4589 */
db5ed4df 4590static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4264fd61
SRT
4591{
4592 struct ufs_hba *hba = shost_priv(sdev->host);
4593
4594 if (depth > hba->nutrs)
4595 depth = hba->nutrs;
db5ed4df 4596 return scsi_change_queue_depth(sdev, depth);
4264fd61
SRT
4597}
4598
eeda4749
AM
4599/**
4600 * ufshcd_slave_configure - adjust SCSI device configurations
4601 * @sdev: pointer to SCSI device
4602 */
4603static int ufshcd_slave_configure(struct scsi_device *sdev)
4604{
49615ba1 4605 struct ufs_hba *hba = shost_priv(sdev->host);
eeda4749
AM
4606 struct request_queue *q = sdev->request_queue;
4607
4608 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
49615ba1
SC
4609
4610 if (ufshcd_is_rpm_autosuspend_allowed(hba))
4611 sdev->rpm_autosuspend = 1;
4612
eeda4749
AM
4613 return 0;
4614}
4615
7a3e97b0
SY
4616/**
4617 * ufshcd_slave_destroy - remove SCSI device configurations
4618 * @sdev: pointer to SCSI device
4619 */
4620static void ufshcd_slave_destroy(struct scsi_device *sdev)
4621{
4622 struct ufs_hba *hba;
4623
4624 hba = shost_priv(sdev->host);
0ce147d4 4625 /* Drop the reference as it won't be needed anymore */
7c48bfd0
AM
4626 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4627 unsigned long flags;
4628
4629 spin_lock_irqsave(hba->host->host_lock, flags);
0ce147d4 4630 hba->sdev_ufs_device = NULL;
7c48bfd0
AM
4631 spin_unlock_irqrestore(hba->host->host_lock, flags);
4632 }
7a3e97b0
SY
4633}
4634
7a3e97b0
SY
4635/**
4636 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
8aa29f19 4637 * @lrbp: pointer to local reference block of completed command
7a3e97b0
SY
4638 * @scsi_status: SCSI command status
4639 *
4640 * Returns value base on SCSI command status
4641 */
4642static inline int
4643ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4644{
4645 int result = 0;
4646
4647 switch (scsi_status) {
7a3e97b0 4648 case SAM_STAT_CHECK_CONDITION:
1c2623c5 4649 ufshcd_copy_sense_data(lrbp);
30eb2e4c 4650 /* fallthrough */
1c2623c5 4651 case SAM_STAT_GOOD:
7a3e97b0
SY
4652 result |= DID_OK << 16 |
4653 COMMAND_COMPLETE << 8 |
1c2623c5 4654 scsi_status;
7a3e97b0
SY
4655 break;
4656 case SAM_STAT_TASK_SET_FULL:
1c2623c5 4657 case SAM_STAT_BUSY:
7a3e97b0 4658 case SAM_STAT_TASK_ABORTED:
1c2623c5
SJ
4659 ufshcd_copy_sense_data(lrbp);
4660 result |= scsi_status;
7a3e97b0
SY
4661 break;
4662 default:
4663 result |= DID_ERROR << 16;
4664 break;
4665 } /* end of switch */
4666
4667 return result;
4668}
4669
4670/**
4671 * ufshcd_transfer_rsp_status - Get overall status of the response
4672 * @hba: per adapter instance
8aa29f19 4673 * @lrbp: pointer to local reference block of completed command
7a3e97b0
SY
4674 *
4675 * Returns result of the command to notify SCSI midlayer
4676 */
4677static inline int
4678ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4679{
4680 int result = 0;
4681 int scsi_status;
4682 int ocs;
4683
4684 /* overall command status of utrd */
4685 ocs = ufshcd_get_tr_ocs(lrbp);
4686
4687 switch (ocs) {
4688 case OCS_SUCCESS:
5a0b0cb9 4689 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
ff8e20c6 4690 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5a0b0cb9
SRT
4691 switch (result) {
4692 case UPIU_TRANSACTION_RESPONSE:
4693 /*
4694 * get the response UPIU result to extract
4695 * the SCSI command status
4696 */
4697 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4698
4699 /*
4700 * get the result based on SCSI status response
4701 * to notify the SCSI midlayer of the command status
4702 */
4703 scsi_status = result & MASK_SCSI_STATUS;
4704 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
66ec6d59 4705
f05ac2e5
YG
4706 /*
4707 * Currently we are only supporting BKOPs exception
4708 * events hence we can ignore BKOPs exception event
4709 * during power management callbacks. BKOPs exception
4710 * event is not expected to be raised in runtime suspend
4711 * callback as it allows the urgent bkops.
4712 * During system suspend, we are anyway forcefully
4713 * disabling the bkops and if urgent bkops is needed
4714 * it will be enabled on system resume. Long term
4715 * solution could be to abort the system suspend if
4716 * UFS device needs urgent BKOPs.
4717 */
4718 if (!hba->pm_op_in_progress &&
4719 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
66ec6d59 4720 schedule_work(&hba->eeh_work);
5a0b0cb9
SRT
4721 break;
4722 case UPIU_TRANSACTION_REJECT_UPIU:
4723 /* TODO: handle Reject UPIU Response */
4724 result = DID_ERROR << 16;
3b1d0580 4725 dev_err(hba->dev,
5a0b0cb9
SRT
4726 "Reject UPIU not fully implemented\n");
4727 break;
4728 default:
5a0b0cb9
SRT
4729 dev_err(hba->dev,
4730 "Unexpected request response code = %x\n",
4731 result);
e0347d89 4732 result = DID_ERROR << 16;
7a3e97b0
SY
4733 break;
4734 }
7a3e97b0
SY
4735 break;
4736 case OCS_ABORTED:
4737 result |= DID_ABORT << 16;
4738 break;
e8e7f271
SRT
4739 case OCS_INVALID_COMMAND_STATUS:
4740 result |= DID_REQUEUE << 16;
4741 break;
7a3e97b0
SY
4742 case OCS_INVALID_CMD_TABLE_ATTR:
4743 case OCS_INVALID_PRDT_ATTR:
4744 case OCS_MISMATCH_DATA_BUF_SIZE:
4745 case OCS_MISMATCH_RESP_UPIU_SIZE:
4746 case OCS_PEER_COMM_FAILURE:
4747 case OCS_FATAL_ERROR:
4748 default:
4749 result |= DID_ERROR << 16;
3b1d0580 4750 dev_err(hba->dev,
ff8e20c6
DR
4751 "OCS error from controller = %x for tag %d\n",
4752 ocs, lrbp->task_tag);
4753 ufshcd_print_host_regs(hba);
6ba65588 4754 ufshcd_print_host_state(hba);
7a3e97b0
SY
4755 break;
4756 } /* end of switch */
4757
2df74b69 4758 if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
66cc820f 4759 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
7a3e97b0
SY
4760 return result;
4761}
4762
6ccf44fe
SJ
4763/**
4764 * ufshcd_uic_cmd_compl - handle completion of uic command
4765 * @hba: per adapter instance
53b3d9c3 4766 * @intr_status: interrupt status generated by the controller
9333d775
VG
4767 *
4768 * Returns
4769 * IRQ_HANDLED - If interrupt is valid
4770 * IRQ_NONE - If invalid interrupt
6ccf44fe 4771 */
9333d775 4772static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
6ccf44fe 4773{
9333d775
VG
4774 irqreturn_t retval = IRQ_NONE;
4775
53b3d9c3 4776 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
6ccf44fe
SJ
4777 hba->active_uic_cmd->argument2 |=
4778 ufshcd_get_uic_cmd_result(hba);
12b4fdb4
SJ
4779 hba->active_uic_cmd->argument3 =
4780 ufshcd_get_dme_attr_val(hba);
6ccf44fe 4781 complete(&hba->active_uic_cmd->done);
9333d775 4782 retval = IRQ_HANDLED;
6ccf44fe 4783 }
53b3d9c3 4784
9333d775 4785 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
57d104c1 4786 complete(hba->uic_async_done);
9333d775
VG
4787 retval = IRQ_HANDLED;
4788 }
4789 return retval;
6ccf44fe
SJ
4790}
4791
7a3e97b0 4792/**
9a47ec7c 4793 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
7a3e97b0 4794 * @hba: per adapter instance
9a47ec7c 4795 * @completed_reqs: requests to complete
7a3e97b0 4796 */
9a47ec7c
YG
4797static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4798 unsigned long completed_reqs)
7a3e97b0 4799{
5a0b0cb9
SRT
4800 struct ufshcd_lrb *lrbp;
4801 struct scsi_cmnd *cmd;
7a3e97b0
SY
4802 int result;
4803 int index;
e9d501b1 4804
e9d501b1
DR
4805 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4806 lrbp = &hba->lrb[index];
4807 cmd = lrbp->cmd;
4808 if (cmd) {
1a07f2d9 4809 ufshcd_add_command_trace(hba, index, "complete");
e9d501b1
DR
4810 result = ufshcd_transfer_rsp_status(hba, lrbp);
4811 scsi_dma_unmap(cmd);
4812 cmd->result = result;
4813 /* Mark completed command as NULL in LRB */
4814 lrbp->cmd = NULL;
74a527a2 4815 lrbp->compl_time_stamp = ktime_get();
e9d501b1
DR
4816 /* Do not touch lrbp after scsi done */
4817 cmd->scsi_done(cmd);
1ab27c9c 4818 __ufshcd_release(hba);
300bb13f
JP
4819 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4820 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
74a527a2 4821 lrbp->compl_time_stamp = ktime_get();
1a07f2d9
LS
4822 if (hba->dev_cmd.complete) {
4823 ufshcd_add_command_trace(hba, index,
4824 "dev_complete");
e9d501b1 4825 complete(hba->dev_cmd.complete);
1a07f2d9 4826 }
e9d501b1 4827 }
401f1e44
SJ
4828 if (ufshcd_is_clkscaling_supported(hba))
4829 hba->clk_scaling.active_reqs--;
e9d501b1 4830 }
7a3e97b0
SY
4831
4832 /* clear corresponding bits of completed commands */
4833 hba->outstanding_reqs ^= completed_reqs;
4834
856b3483 4835 ufshcd_clk_scaling_update_busy(hba);
7a3e97b0
SY
4836}
4837
9a47ec7c
YG
4838/**
4839 * ufshcd_transfer_req_compl - handle SCSI and query command completion
4840 * @hba: per adapter instance
9333d775
VG
4841 *
4842 * Returns
4843 * IRQ_HANDLED - If interrupt is valid
4844 * IRQ_NONE - If invalid interrupt
9a47ec7c 4845 */
9333d775 4846static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
9a47ec7c
YG
4847{
4848 unsigned long completed_reqs;
4849 u32 tr_doorbell;
4850
4851 /* Resetting interrupt aggregation counters first and reading the
4852 * DOOR_BELL afterward allows us to handle all the completed requests.
4853 * In order to prevent other interrupts starvation the DB is read once
4854 * after reset. The down side of this solution is the possibility of
4855 * false interrupt if device completes another request after resetting
4856 * aggregation and before reading the DB.
4857 */
5ac6abc9
AA
4858 if (ufshcd_is_intr_aggr_allowed(hba) &&
4859 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
9a47ec7c
YG
4860 ufshcd_reset_intr_aggr(hba);
4861
4862 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4863 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4864
9333d775
VG
4865 if (completed_reqs) {
4866 __ufshcd_transfer_req_compl(hba, completed_reqs);
4867 return IRQ_HANDLED;
4868 } else {
4869 return IRQ_NONE;
4870 }
9a47ec7c
YG
4871}
4872
66ec6d59
SRT
4873/**
4874 * ufshcd_disable_ee - disable exception event
4875 * @hba: per-adapter instance
4876 * @mask: exception event to disable
4877 *
4878 * Disables exception event in the device so that the EVENT_ALERT
4879 * bit is not set.
4880 *
4881 * Returns zero on success, non-zero error value on failure.
4882 */
4883static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4884{
4885 int err = 0;
4886 u32 val;
4887
4888 if (!(hba->ee_ctrl_mask & mask))
4889 goto out;
4890
4891 val = hba->ee_ctrl_mask & ~mask;
d7e2ddd5 4892 val &= MASK_EE_STATUS;
5e86ae44 4893 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
66ec6d59
SRT
4894 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4895 if (!err)
4896 hba->ee_ctrl_mask &= ~mask;
4897out:
4898 return err;
4899}
4900
4901/**
4902 * ufshcd_enable_ee - enable exception event
4903 * @hba: per-adapter instance
4904 * @mask: exception event to enable
4905 *
4906 * Enable corresponding exception event in the device to allow
4907 * device to alert host in critical scenarios.
4908 *
4909 * Returns zero on success, non-zero error value on failure.
4910 */
4911static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4912{
4913 int err = 0;
4914 u32 val;
4915
4916 if (hba->ee_ctrl_mask & mask)
4917 goto out;
4918
4919 val = hba->ee_ctrl_mask | mask;
d7e2ddd5 4920 val &= MASK_EE_STATUS;
5e86ae44 4921 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
66ec6d59
SRT
4922 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4923 if (!err)
4924 hba->ee_ctrl_mask |= mask;
4925out:
4926 return err;
4927}
4928
4929/**
4930 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4931 * @hba: per-adapter instance
4932 *
4933 * Allow device to manage background operations on its own. Enabling
4934 * this might lead to inconsistent latencies during normal data transfers
4935 * as the device is allowed to manage its own way of handling background
4936 * operations.
4937 *
4938 * Returns zero on success, non-zero on failure.
4939 */
4940static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4941{
4942 int err = 0;
4943
4944 if (hba->auto_bkops_enabled)
4945 goto out;
4946
dc3c8d3a 4947 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
66ec6d59
SRT
4948 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4949 if (err) {
4950 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
4951 __func__, err);
4952 goto out;
4953 }
4954
4955 hba->auto_bkops_enabled = true;
7ff5ab47 4956 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
66ec6d59
SRT
4957
4958 /* No need of URGENT_BKOPS exception from the device */
4959 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4960 if (err)
4961 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
4962 __func__, err);
4963out:
4964 return err;
4965}
4966
4967/**
4968 * ufshcd_disable_auto_bkops - block device in doing background operations
4969 * @hba: per-adapter instance
4970 *
4971 * Disabling background operations improves command response latency but
4972 * has drawback of device moving into critical state where the device is
4973 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
4974 * host is idle so that BKOPS are managed effectively without any negative
4975 * impacts.
4976 *
4977 * Returns zero on success, non-zero on failure.
4978 */
4979static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
4980{
4981 int err = 0;
4982
4983 if (!hba->auto_bkops_enabled)
4984 goto out;
4985
4986 /*
4987 * If host assisted BKOPs is to be enabled, make sure
4988 * urgent bkops exception is allowed.
4989 */
4990 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
4991 if (err) {
4992 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
4993 __func__, err);
4994 goto out;
4995 }
4996
dc3c8d3a 4997 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
66ec6d59
SRT
4998 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4999 if (err) {
5000 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5001 __func__, err);
5002 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5003 goto out;
5004 }
5005
5006 hba->auto_bkops_enabled = false;
7ff5ab47 5007 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
24366c2a 5008 hba->is_urgent_bkops_lvl_checked = false;
66ec6d59
SRT
5009out:
5010 return err;
5011}
5012
5013/**
4e768e76 5014 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
66ec6d59
SRT
5015 * @hba: per adapter instance
5016 *
5017 * After a device reset the device may toggle the BKOPS_EN flag
5018 * to default value. The s/w tracking variables should be updated
4e768e76
SJ
5019 * as well. This function would change the auto-bkops state based on
5020 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
66ec6d59 5021 */
4e768e76 5022static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
66ec6d59 5023{
4e768e76
SJ
5024 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5025 hba->auto_bkops_enabled = false;
5026 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5027 ufshcd_enable_auto_bkops(hba);
5028 } else {
5029 hba->auto_bkops_enabled = true;
5030 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5031 ufshcd_disable_auto_bkops(hba);
5032 }
24366c2a 5033 hba->is_urgent_bkops_lvl_checked = false;
66ec6d59
SRT
5034}
5035
5036static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5037{
5e86ae44 5038 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
66ec6d59
SRT
5039 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5040}
5041
5042/**
57d104c1 5043 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
66ec6d59 5044 * @hba: per-adapter instance
57d104c1 5045 * @status: bkops_status value
66ec6d59 5046 *
57d104c1
SJ
5047 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5048 * flag in the device to permit background operations if the device
5049 * bkops_status is greater than or equal to "status" argument passed to
5050 * this function, disable otherwise.
5051 *
5052 * Returns 0 for success, non-zero in case of failure.
5053 *
5054 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5055 * to know whether auto bkops is enabled or disabled after this function
5056 * returns control to it.
66ec6d59 5057 */
57d104c1
SJ
5058static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5059 enum bkops_status status)
66ec6d59
SRT
5060{
5061 int err;
57d104c1 5062 u32 curr_status = 0;
66ec6d59 5063
57d104c1 5064 err = ufshcd_get_bkops_status(hba, &curr_status);
66ec6d59
SRT
5065 if (err) {
5066 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5067 __func__, err);
5068 goto out;
57d104c1
SJ
5069 } else if (curr_status > BKOPS_STATUS_MAX) {
5070 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5071 __func__, curr_status);
5072 err = -EINVAL;
5073 goto out;
66ec6d59
SRT
5074 }
5075
57d104c1 5076 if (curr_status >= status)
66ec6d59 5077 err = ufshcd_enable_auto_bkops(hba);
57d104c1
SJ
5078 else
5079 err = ufshcd_disable_auto_bkops(hba);
24366c2a 5080 hba->urgent_bkops_lvl = curr_status;
66ec6d59
SRT
5081out:
5082 return err;
5083}
5084
57d104c1
SJ
5085/**
5086 * ufshcd_urgent_bkops - handle urgent bkops exception event
5087 * @hba: per-adapter instance
5088 *
5089 * Enable fBackgroundOpsEn flag in the device to permit background
5090 * operations.
5091 *
5092 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5093 * and negative error value for any other failure.
5094 */
5095static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5096{
afdfff59 5097 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
57d104c1
SJ
5098}
5099
66ec6d59
SRT
5100static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5101{
5e86ae44 5102 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
66ec6d59
SRT
5103 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5104}
5105
afdfff59
YG
5106static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5107{
5108 int err;
5109 u32 curr_status = 0;
5110
5111 if (hba->is_urgent_bkops_lvl_checked)
5112 goto enable_auto_bkops;
5113
5114 err = ufshcd_get_bkops_status(hba, &curr_status);
5115 if (err) {
5116 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5117 __func__, err);
5118 goto out;
5119 }
5120
5121 /*
5122 * We are seeing that some devices are raising the urgent bkops
5123 * exception events even when BKOPS status doesn't indicate performace
5124 * impacted or critical. Handle these device by determining their urgent
5125 * bkops status at runtime.
5126 */
5127 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5128 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5129 __func__, curr_status);
5130 /* update the current status as the urgent bkops level */
5131 hba->urgent_bkops_lvl = curr_status;
5132 hba->is_urgent_bkops_lvl_checked = true;
5133 }
5134
5135enable_auto_bkops:
5136 err = ufshcd_enable_auto_bkops(hba);
5137out:
5138 if (err < 0)
5139 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5140 __func__, err);
5141}
5142
66ec6d59
SRT
5143/**
5144 * ufshcd_exception_event_handler - handle exceptions raised by device
5145 * @work: pointer to work data
5146 *
5147 * Read bExceptionEventStatus attribute from the device and handle the
5148 * exception event accordingly.
5149 */
5150static void ufshcd_exception_event_handler(struct work_struct *work)
5151{
5152 struct ufs_hba *hba;
5153 int err;
5154 u32 status = 0;
5155 hba = container_of(work, struct ufs_hba, eeh_work);
5156
62694735 5157 pm_runtime_get_sync(hba->dev);
2e3611e9 5158 scsi_block_requests(hba->host);
66ec6d59
SRT
5159 err = ufshcd_get_ee_status(hba, &status);
5160 if (err) {
5161 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5162 __func__, err);
5163 goto out;
5164 }
5165
5166 status &= hba->ee_ctrl_mask;
afdfff59
YG
5167
5168 if (status & MASK_EE_URGENT_BKOPS)
5169 ufshcd_bkops_exception_event_handler(hba);
5170
66ec6d59 5171out:
2e3611e9 5172 scsi_unblock_requests(hba->host);
62694735 5173 pm_runtime_put_sync(hba->dev);
66ec6d59
SRT
5174 return;
5175}
5176
9a47ec7c
YG
5177/* Complete requests that have door-bell cleared */
5178static void ufshcd_complete_requests(struct ufs_hba *hba)
5179{
5180 ufshcd_transfer_req_compl(hba);
5181 ufshcd_tmc_handler(hba);
5182}
5183
583fa62d
YG
5184/**
5185 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5186 * to recover from the DL NAC errors or not.
5187 * @hba: per-adapter instance
5188 *
5189 * Returns true if error handling is required, false otherwise
5190 */
5191static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5192{
5193 unsigned long flags;
5194 bool err_handling = true;
5195
5196 spin_lock_irqsave(hba->host->host_lock, flags);
5197 /*
5198 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5199 * device fatal error and/or DL NAC & REPLAY timeout errors.
5200 */
5201 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5202 goto out;
5203
5204 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5205 ((hba->saved_err & UIC_ERROR) &&
5206 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5207 goto out;
5208
5209 if ((hba->saved_err & UIC_ERROR) &&
5210 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5211 int err;
5212 /*
5213 * wait for 50ms to see if we can get any other errors or not.
5214 */
5215 spin_unlock_irqrestore(hba->host->host_lock, flags);
5216 msleep(50);
5217 spin_lock_irqsave(hba->host->host_lock, flags);
5218
5219 /*
5220 * now check if we have got any other severe errors other than
5221 * DL NAC error?
5222 */
5223 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5224 ((hba->saved_err & UIC_ERROR) &&
5225 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5226 goto out;
5227
5228 /*
5229 * As DL NAC is the only error received so far, send out NOP
5230 * command to confirm if link is still active or not.
5231 * - If we don't get any response then do error recovery.
5232 * - If we get response then clear the DL NAC error bit.
5233 */
5234
5235 spin_unlock_irqrestore(hba->host->host_lock, flags);
5236 err = ufshcd_verify_dev_init(hba);
5237 spin_lock_irqsave(hba->host->host_lock, flags);
5238
5239 if (err)
5240 goto out;
5241
5242 /* Link seems to be alive hence ignore the DL NAC errors */
5243 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5244 hba->saved_err &= ~UIC_ERROR;
5245 /* clear NAC error */
5246 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5247 if (!hba->saved_uic_err) {
5248 err_handling = false;
5249 goto out;
5250 }
5251 }
5252out:
5253 spin_unlock_irqrestore(hba->host->host_lock, flags);
5254 return err_handling;
5255}
5256
7a3e97b0 5257/**
e8e7f271
SRT
5258 * ufshcd_err_handler - handle UFS errors that require s/w attention
5259 * @work: pointer to work structure
7a3e97b0 5260 */
e8e7f271 5261static void ufshcd_err_handler(struct work_struct *work)
7a3e97b0
SY
5262{
5263 struct ufs_hba *hba;
e8e7f271
SRT
5264 unsigned long flags;
5265 u32 err_xfer = 0;
5266 u32 err_tm = 0;
5267 int err = 0;
5268 int tag;
9a47ec7c 5269 bool needs_reset = false;
e8e7f271
SRT
5270
5271 hba = container_of(work, struct ufs_hba, eh_work);
7a3e97b0 5272
62694735 5273 pm_runtime_get_sync(hba->dev);
1ab27c9c 5274 ufshcd_hold(hba, false);
e8e7f271
SRT
5275
5276 spin_lock_irqsave(hba->host->host_lock, flags);
9a47ec7c 5277 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
e8e7f271 5278 goto out;
e8e7f271
SRT
5279
5280 hba->ufshcd_state = UFSHCD_STATE_RESET;
5281 ufshcd_set_eh_in_progress(hba);
5282
5283 /* Complete requests that have door-bell cleared by h/w */
9a47ec7c 5284 ufshcd_complete_requests(hba);
583fa62d
YG
5285
5286 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5287 bool ret;
5288
5289 spin_unlock_irqrestore(hba->host->host_lock, flags);
5290 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5291 ret = ufshcd_quirk_dl_nac_errors(hba);
5292 spin_lock_irqsave(hba->host->host_lock, flags);
5293 if (!ret)
5294 goto skip_err_handling;
5295 }
9a47ec7c 5296 if ((hba->saved_err & INT_FATAL_ERRORS) ||
82174440 5297 (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
9a47ec7c
YG
5298 ((hba->saved_err & UIC_ERROR) &&
5299 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5300 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5301 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5302 needs_reset = true;
e8e7f271 5303
9a47ec7c
YG
5304 /*
5305 * if host reset is required then skip clearing the pending
2df74b69
CG
5306 * transfers forcefully because they will get cleared during
5307 * host reset and restore
9a47ec7c
YG
5308 */
5309 if (needs_reset)
5310 goto skip_pending_xfer_clear;
5311
5312 /* release lock as clear command might sleep */
5313 spin_unlock_irqrestore(hba->host->host_lock, flags);
e8e7f271 5314 /* Clear pending transfer requests */
9a47ec7c
YG
5315 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5316 if (ufshcd_clear_cmd(hba, tag)) {
5317 err_xfer = true;
5318 goto lock_skip_pending_xfer_clear;
5319 }
5320 }
e8e7f271
SRT
5321
5322 /* Clear pending task management requests */
9a47ec7c
YG
5323 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5324 if (ufshcd_clear_tm_cmd(hba, tag)) {
5325 err_tm = true;
5326 goto lock_skip_pending_xfer_clear;
5327 }
5328 }
e8e7f271 5329
9a47ec7c 5330lock_skip_pending_xfer_clear:
e8e7f271 5331 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271 5332
9a47ec7c
YG
5333 /* Complete the requests that are cleared by s/w */
5334 ufshcd_complete_requests(hba);
5335
5336 if (err_xfer || err_tm)
5337 needs_reset = true;
5338
5339skip_pending_xfer_clear:
e8e7f271 5340 /* Fatal errors need reset */
9a47ec7c
YG
5341 if (needs_reset) {
5342 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5343
5344 /*
5345 * ufshcd_reset_and_restore() does the link reinitialization
5346 * which will need atleast one empty doorbell slot to send the
5347 * device management commands (NOP and query commands).
5348 * If there is no slot empty at this moment then free up last
5349 * slot forcefully.
5350 */
5351 if (hba->outstanding_reqs == max_doorbells)
5352 __ufshcd_transfer_req_compl(hba,
5353 (1UL << (hba->nutrs - 1)));
5354
5355 spin_unlock_irqrestore(hba->host->host_lock, flags);
e8e7f271 5356 err = ufshcd_reset_and_restore(hba);
9a47ec7c 5357 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271
SRT
5358 if (err) {
5359 dev_err(hba->dev, "%s: reset and restore failed\n",
5360 __func__);
5361 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5362 }
5363 /*
5364 * Inform scsi mid-layer that we did reset and allow to handle
5365 * Unit Attention properly.
5366 */
5367 scsi_report_bus_reset(hba->host, 0);
5368 hba->saved_err = 0;
5369 hba->saved_uic_err = 0;
5370 }
9a47ec7c 5371
583fa62d 5372skip_err_handling:
9a47ec7c
YG
5373 if (!needs_reset) {
5374 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5375 if (hba->saved_err || hba->saved_uic_err)
5376 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5377 __func__, hba->saved_err, hba->saved_uic_err);
5378 }
5379
e8e7f271
SRT
5380 ufshcd_clear_eh_in_progress(hba);
5381
5382out:
9a47ec7c 5383 spin_unlock_irqrestore(hba->host->host_lock, flags);
38135535 5384 ufshcd_scsi_unblock_requests(hba);
1ab27c9c 5385 ufshcd_release(hba);
62694735 5386 pm_runtime_put_sync(hba->dev);
7a3e97b0
SY
5387}
5388
5389/**
e8e7f271
SRT
5390 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5391 * @hba: per-adapter instance
9333d775
VG
5392 *
5393 * Returns
5394 * IRQ_HANDLED - If interrupt is valid
5395 * IRQ_NONE - If invalid interrupt
7a3e97b0 5396 */
9333d775 5397static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
7a3e97b0
SY
5398{
5399 u32 reg;
9333d775 5400 irqreturn_t retval = IRQ_NONE;
7a3e97b0 5401
fb7b45f0
DR
5402 /* PHY layer lane error */
5403 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5404 /* Ignore LINERESET indication, as this is not an error */
5405 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
9333d775 5406 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
fb7b45f0
DR
5407 /*
5408 * To know whether this error is fatal or not, DB timeout
5409 * must be checked but this error is handled separately.
5410 */
5411 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
48d5b973 5412 ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
9333d775 5413 retval |= IRQ_HANDLED;
ff8e20c6 5414 }
fb7b45f0 5415
e8e7f271
SRT
5416 /* PA_INIT_ERROR is fatal and needs UIC reset */
5417 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
9333d775
VG
5418 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
5419 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
48d5b973 5420 ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
ff8e20c6 5421
9333d775
VG
5422 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5423 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5424 else if (hba->dev_quirks &
5425 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5426 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5427 hba->uic_error |=
5428 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5429 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5430 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5431 }
5432 retval |= IRQ_HANDLED;
583fa62d 5433 }
e8e7f271
SRT
5434
5435 /* UIC NL/TL/DME errors needs software retry */
5436 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
9333d775
VG
5437 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
5438 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
48d5b973 5439 ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
e8e7f271 5440 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
9333d775 5441 retval |= IRQ_HANDLED;
ff8e20c6 5442 }
e8e7f271
SRT
5443
5444 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
9333d775
VG
5445 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
5446 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
48d5b973 5447 ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
e8e7f271 5448 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
9333d775 5449 retval |= IRQ_HANDLED;
ff8e20c6 5450 }
e8e7f271
SRT
5451
5452 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
9333d775
VG
5453 if ((reg & UIC_DME_ERROR) &&
5454 (reg & UIC_DME_ERROR_CODE_MASK)) {
48d5b973 5455 ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
e8e7f271 5456 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
9333d775 5457 retval |= IRQ_HANDLED;
ff8e20c6 5458 }
e8e7f271
SRT
5459
5460 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5461 __func__, hba->uic_error);
9333d775 5462 return retval;
e8e7f271
SRT
5463}
5464
82174440
SC
5465static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5466 u32 intr_mask)
5467{
5468 if (!ufshcd_is_auto_hibern8_supported(hba))
5469 return false;
5470
5471 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5472 return false;
5473
5474 if (hba->active_uic_cmd &&
5475 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5476 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5477 return false;
5478
5479 return true;
5480}
5481
e8e7f271
SRT
5482/**
5483 * ufshcd_check_errors - Check for errors that need s/w attention
5484 * @hba: per-adapter instance
9333d775
VG
5485 *
5486 * Returns
5487 * IRQ_HANDLED - If interrupt is valid
5488 * IRQ_NONE - If invalid interrupt
e8e7f271 5489 */
9333d775 5490static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
e8e7f271
SRT
5491{
5492 bool queue_eh_work = false;
9333d775 5493 irqreturn_t retval = IRQ_NONE;
e8e7f271 5494
d3c615bf
SC
5495 if (hba->errors & INT_FATAL_ERRORS) {
5496 ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
e8e7f271 5497 queue_eh_work = true;
d3c615bf 5498 }
7a3e97b0
SY
5499
5500 if (hba->errors & UIC_ERROR) {
e8e7f271 5501 hba->uic_error = 0;
9333d775 5502 retval = ufshcd_update_uic_error(hba);
e8e7f271
SRT
5503 if (hba->uic_error)
5504 queue_eh_work = true;
7a3e97b0 5505 }
e8e7f271 5506
82174440
SC
5507 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
5508 dev_err(hba->dev,
5509 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5510 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
5511 "Enter" : "Exit",
5512 hba->errors, ufshcd_get_upmcrs(hba));
d3c615bf
SC
5513 ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
5514 hba->errors);
82174440
SC
5515 queue_eh_work = true;
5516 }
5517
e8e7f271 5518 if (queue_eh_work) {
9a47ec7c
YG
5519 /*
5520 * update the transfer error masks to sticky bits, let's do this
5521 * irrespective of current ufshcd_state.
5522 */
5523 hba->saved_err |= hba->errors;
5524 hba->saved_uic_err |= hba->uic_error;
5525
e8e7f271
SRT
5526 /* handle fatal errors only when link is functional */
5527 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5528 /* block commands from scsi mid-layer */
38135535 5529 ufshcd_scsi_block_requests(hba);
e8e7f271 5530
141f8165 5531 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
66cc820f
DR
5532
5533 /* dump controller state before resetting */
5534 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5535 bool pr_prdt = !!(hba->saved_err &
5536 SYSTEM_BUS_FATAL_ERROR);
5537
5538 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5539 __func__, hba->saved_err,
5540 hba->saved_uic_err);
5541
5542 ufshcd_print_host_regs(hba);
5543 ufshcd_print_pwr_info(hba);
5544 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5545 ufshcd_print_trs(hba, hba->outstanding_reqs,
5546 pr_prdt);
5547 }
e8e7f271
SRT
5548 schedule_work(&hba->eh_work);
5549 }
9333d775 5550 retval |= IRQ_HANDLED;
3441da7d 5551 }
e8e7f271
SRT
5552 /*
5553 * if (!queue_eh_work) -
5554 * Other errors are either non-fatal where host recovers
5555 * itself without s/w intervention or errors that will be
5556 * handled by the SCSI core layer.
5557 */
9333d775 5558 return retval;
7a3e97b0
SY
5559}
5560
69a6c269
BVA
5561struct ctm_info {
5562 struct ufs_hba *hba;
5563 unsigned long pending;
5564 unsigned int ncpl;
5565};
5566
5567static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
5568{
5569 struct ctm_info *const ci = priv;
5570 struct completion *c;
5571
5572 WARN_ON_ONCE(reserved);
5573 if (test_bit(req->tag, &ci->pending))
5574 return true;
5575 ci->ncpl++;
5576 c = req->end_io_data;
5577 if (c)
5578 complete(c);
5579 return true;
5580}
5581
7a3e97b0
SY
5582/**
5583 * ufshcd_tmc_handler - handle task management function completion
5584 * @hba: per adapter instance
9333d775
VG
5585 *
5586 * Returns
5587 * IRQ_HANDLED - If interrupt is valid
5588 * IRQ_NONE - If invalid interrupt
7a3e97b0 5589 */
9333d775 5590static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
7a3e97b0 5591{
69a6c269
BVA
5592 struct request_queue *q = hba->tmf_queue;
5593 struct ctm_info ci = {
5594 .hba = hba,
5595 .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
5596 };
7a3e97b0 5597
69a6c269
BVA
5598 blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
5599 return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
7a3e97b0
SY
5600}
5601
5602/**
5603 * ufshcd_sl_intr - Interrupt service routine
5604 * @hba: per adapter instance
5605 * @intr_status: contains interrupts generated by the controller
9333d775
VG
5606 *
5607 * Returns
5608 * IRQ_HANDLED - If interrupt is valid
5609 * IRQ_NONE - If invalid interrupt
7a3e97b0 5610 */
9333d775 5611static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
7a3e97b0 5612{
9333d775
VG
5613 irqreturn_t retval = IRQ_NONE;
5614
7a3e97b0 5615 hba->errors = UFSHCD_ERROR_MASK & intr_status;
82174440
SC
5616
5617 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5618 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5619
7a3e97b0 5620 if (hba->errors)
9333d775 5621 retval |= ufshcd_check_errors(hba);
7a3e97b0 5622
53b3d9c3 5623 if (intr_status & UFSHCD_UIC_MASK)
9333d775 5624 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
7a3e97b0
SY
5625
5626 if (intr_status & UTP_TASK_REQ_COMPL)
9333d775 5627 retval |= ufshcd_tmc_handler(hba);
7a3e97b0
SY
5628
5629 if (intr_status & UTP_TRANSFER_REQ_COMPL)
9333d775
VG
5630 retval |= ufshcd_transfer_req_compl(hba);
5631
5632 return retval;
7a3e97b0
SY
5633}
5634
5635/**
5636 * ufshcd_intr - Main interrupt service routine
5637 * @irq: irq number
5638 * @__hba: pointer to adapter instance
5639 *
9333d775
VG
5640 * Returns
5641 * IRQ_HANDLED - If interrupt is valid
5642 * IRQ_NONE - If invalid interrupt
7a3e97b0
SY
5643 */
5644static irqreturn_t ufshcd_intr(int irq, void *__hba)
5645{
d75f7fe4 5646 u32 intr_status, enabled_intr_status;
7a3e97b0
SY
5647 irqreturn_t retval = IRQ_NONE;
5648 struct ufs_hba *hba = __hba;
7f6ba4f1 5649 int retries = hba->nutrs;
7a3e97b0
SY
5650
5651 spin_lock(hba->host->host_lock);
b873a275 5652 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7a3e97b0 5653
7f6ba4f1
VG
5654 /*
5655 * There could be max of hba->nutrs reqs in flight and in worst case
5656 * if the reqs get finished 1 by 1 after the interrupt status is
5657 * read, make sure we handle them by checking the interrupt status
5658 * again in a loop until we process all of the reqs before returning.
5659 */
5660 do {
5661 enabled_intr_status =
5662 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5663 if (intr_status)
5664 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
9333d775
VG
5665 if (enabled_intr_status)
5666 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
7f6ba4f1
VG
5667
5668 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5669 } while (intr_status && --retries);
d75f7fe4 5670
9333d775
VG
5671 if (retval == IRQ_NONE) {
5672 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
5673 __func__, intr_status);
5674 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
5675 }
5676
7a3e97b0
SY
5677 spin_unlock(hba->host->host_lock);
5678 return retval;
5679}
5680
e2933132
SRT
5681static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5682{
5683 int err = 0;
5684 u32 mask = 1 << tag;
5685 unsigned long flags;
5686
5687 if (!test_bit(tag, &hba->outstanding_tasks))
5688 goto out;
5689
5690 spin_lock_irqsave(hba->host->host_lock, flags);
1399c5b0 5691 ufshcd_utmrl_clear(hba, tag);
e2933132
SRT
5692 spin_unlock_irqrestore(hba->host->host_lock, flags);
5693
5694 /* poll for max. 1 sec to clear door bell register by h/w */
5695 err = ufshcd_wait_for_register(hba,
5696 REG_UTP_TASK_REQ_DOOR_BELL,
596585a2 5697 mask, 0, 1000, 1000, true);
e2933132
SRT
5698out:
5699 return err;
5700}
5701
c6049cd9
CH
5702static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
5703 struct utp_task_req_desc *treq, u8 tm_function)
7a3e97b0 5704{
69a6c269 5705 struct request_queue *q = hba->tmf_queue;
c6049cd9 5706 struct Scsi_Host *host = hba->host;
69a6c269
BVA
5707 DECLARE_COMPLETION_ONSTACK(wait);
5708 struct request *req;
7a3e97b0 5709 unsigned long flags;
c6049cd9 5710 int free_slot, task_tag, err;
7a3e97b0 5711
e2933132
SRT
5712 /*
5713 * Get free slot, sleep if slots are unavailable.
5714 * Even though we use wait_event() which sleeps indefinitely,
5715 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5716 */
69a6c269
BVA
5717 req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
5718 req->end_io_data = &wait;
5719 free_slot = req->tag;
5720 WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
1ab27c9c 5721 ufshcd_hold(hba, false);
7a3e97b0 5722
e2933132 5723 spin_lock_irqsave(host->host_lock, flags);
e2933132 5724 task_tag = hba->nutrs + free_slot;
7a3e97b0 5725
c6049cd9
CH
5726 treq->req_header.dword_0 |= cpu_to_be32(task_tag);
5727
5728 memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
d2877be4
KK
5729 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5730
7a3e97b0
SY
5731 /* send command to the controller */
5732 __set_bit(free_slot, &hba->outstanding_tasks);
897efe62
YG
5733
5734 /* Make sure descriptors are ready before ringing the task doorbell */
5735 wmb();
5736
b873a275 5737 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
ad1a1b9c
GB
5738 /* Make sure that doorbell is committed immediately */
5739 wmb();
7a3e97b0
SY
5740
5741 spin_unlock_irqrestore(host->host_lock, flags);
5742
6667e6d9
OS
5743 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5744
7a3e97b0 5745 /* wait until the task management command is completed */
69a6c269 5746 err = wait_for_completion_io_timeout(&wait,
e2933132 5747 msecs_to_jiffies(TM_CMD_TIMEOUT));
7a3e97b0 5748 if (!err) {
69a6c269
BVA
5749 /*
5750 * Make sure that ufshcd_compl_tm() does not trigger a
5751 * use-after-free.
5752 */
5753 req->end_io_data = NULL;
6667e6d9 5754 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
e2933132
SRT
5755 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5756 __func__, tm_function);
5757 if (ufshcd_clear_tm_cmd(hba, free_slot))
5758 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5759 __func__, free_slot);
5760 err = -ETIMEDOUT;
5761 } else {
c6049cd9
CH
5762 err = 0;
5763 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
5764
6667e6d9 5765 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
7a3e97b0 5766 }
e2933132 5767
b557217c
SC
5768 spin_lock_irqsave(hba->host->host_lock, flags);
5769 __clear_bit(free_slot, &hba->outstanding_tasks);
5770 spin_unlock_irqrestore(hba->host->host_lock, flags);
5771
69a6c269 5772 blk_put_request(req);
e2933132 5773
1ab27c9c 5774 ufshcd_release(hba);
7a3e97b0
SY
5775 return err;
5776}
5777
c6049cd9
CH
5778/**
5779 * ufshcd_issue_tm_cmd - issues task management commands to controller
5780 * @hba: per adapter instance
5781 * @lun_id: LUN ID to which TM command is sent
5782 * @task_id: task ID to which the TM command is applicable
5783 * @tm_function: task management function opcode
5784 * @tm_response: task management service response return value
5785 *
5786 * Returns non-zero value on error, zero on success.
5787 */
5788static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5789 u8 tm_function, u8 *tm_response)
5790{
5791 struct utp_task_req_desc treq = { { 0 }, };
5792 int ocs_value, err;
5793
5794 /* Configure task request descriptor */
5795 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5796 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5797
5798 /* Configure task request UPIU */
5799 treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
5800 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
5801 treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
5802
5803 /*
5804 * The host shall provide the same value for LUN field in the basic
5805 * header and for Input Parameter.
5806 */
5807 treq.input_param1 = cpu_to_be32(lun_id);
5808 treq.input_param2 = cpu_to_be32(task_id);
5809
5810 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
5811 if (err == -ETIMEDOUT)
5812 return err;
5813
5814 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
5815 if (ocs_value != OCS_SUCCESS)
5816 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
5817 __func__, ocs_value);
5818 else if (tm_response)
5819 *tm_response = be32_to_cpu(treq.output_param1) &
5820 MASK_TM_SERVICE_RESP;
5821 return err;
5822}
5823
5e0a86ee
AA
5824/**
5825 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
5826 * @hba: per-adapter instance
5827 * @req_upiu: upiu request
5828 * @rsp_upiu: upiu reply
5e0a86ee
AA
5829 * @desc_buff: pointer to descriptor buffer, NULL if NA
5830 * @buff_len: descriptor size, 0 if NA
d0e9760d 5831 * @cmd_type: specifies the type (NOP, Query...)
5e0a86ee
AA
5832 * @desc_op: descriptor operation
5833 *
5834 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
5835 * Therefore, it "rides" the device management infrastructure: uses its tag and
5836 * tasks work queues.
5837 *
5838 * Since there is only one available tag for device management commands,
5839 * the caller is expected to hold the hba->dev_cmd.lock mutex.
5840 */
5841static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
5842 struct utp_upiu_req *req_upiu,
5843 struct utp_upiu_req *rsp_upiu,
5844 u8 *desc_buff, int *buff_len,
7f674c38 5845 enum dev_cmd_type cmd_type,
5e0a86ee
AA
5846 enum query_opcode desc_op)
5847{
7252a360
BVA
5848 struct request_queue *q = hba->cmd_queue;
5849 struct request *req;
5e0a86ee
AA
5850 struct ufshcd_lrb *lrbp;
5851 int err = 0;
5852 int tag;
5853 struct completion wait;
5854 unsigned long flags;
5855 u32 upiu_flags;
5856
5857 down_read(&hba->clk_scaling_lock);
5858
7252a360 5859 req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
bb14dd15
DC
5860 if (IS_ERR(req)) {
5861 err = PTR_ERR(req);
5862 goto out_unlock;
5863 }
7252a360
BVA
5864 tag = req->tag;
5865 WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
5e0a86ee
AA
5866
5867 init_completion(&wait);
5868 lrbp = &hba->lrb[tag];
5869 WARN_ON(lrbp->cmd);
5870
5871 lrbp->cmd = NULL;
5872 lrbp->sense_bufflen = 0;
5873 lrbp->sense_buffer = NULL;
5874 lrbp->task_tag = tag;
5875 lrbp->lun = 0;
5876 lrbp->intr_cmd = true;
5877 hba->dev_cmd.type = cmd_type;
5878
5879 switch (hba->ufs_version) {
5880 case UFSHCI_VERSION_10:
5881 case UFSHCI_VERSION_11:
5882 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
5883 break;
5884 default:
5885 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
5886 break;
5887 }
5888
5889 /* update the task tag in the request upiu */
5890 req_upiu->header.dword_0 |= cpu_to_be32(tag);
5891
5892 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
5893
5894 /* just copy the upiu request as it is */
5895 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
5896 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
5897 /* The Data Segment Area is optional depending upon the query
5898 * function value. for WRITE DESCRIPTOR, the data segment
5899 * follows right after the tsf.
5900 */
5901 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
5902 *buff_len = 0;
5903 }
5904
5905 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5906
5907 hba->dev_cmd.complete = &wait;
5908
5909 /* Make sure descriptors are ready before ringing the doorbell */
5910 wmb();
5911 spin_lock_irqsave(hba->host->host_lock, flags);
5912 ufshcd_send_command(hba, tag);
5913 spin_unlock_irqrestore(hba->host->host_lock, flags);
5914
5915 /*
5916 * ignore the returning value here - ufshcd_check_query_response is
5917 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
5918 * read the response directly ignoring all errors.
5919 */
5920 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
5921
5922 /* just copy the upiu response as it is */
5923 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
4bbbe242
AA
5924 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
5925 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
5926 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
5927 MASK_QUERY_DATA_SEG_LEN;
5928
5929 if (*buff_len >= resp_len) {
5930 memcpy(desc_buff, descp, resp_len);
5931 *buff_len = resp_len;
5932 } else {
3d4881d1
BH
5933 dev_warn(hba->dev,
5934 "%s: rsp size %d is bigger than buffer size %d",
5935 __func__, resp_len, *buff_len);
4bbbe242
AA
5936 *buff_len = 0;
5937 err = -EINVAL;
5938 }
5939 }
5e0a86ee 5940
7252a360 5941 blk_put_request(req);
bb14dd15 5942out_unlock:
5e0a86ee
AA
5943 up_read(&hba->clk_scaling_lock);
5944 return err;
5945}
5946
5947/**
5948 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
5949 * @hba: per-adapter instance
5950 * @req_upiu: upiu request
5951 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
5952 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
5953 * @desc_buff: pointer to descriptor buffer, NULL if NA
5954 * @buff_len: descriptor size, 0 if NA
5955 * @desc_op: descriptor operation
5956 *
5957 * Supports UTP Transfer requests (nop and query), and UTP Task
5958 * Management requests.
5959 * It is up to the caller to fill the upiu conent properly, as it will
5960 * be copied without any further input validations.
5961 */
5962int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
5963 struct utp_upiu_req *req_upiu,
5964 struct utp_upiu_req *rsp_upiu,
5965 int msgcode,
5966 u8 *desc_buff, int *buff_len,
5967 enum query_opcode desc_op)
5968{
5969 int err;
7f674c38 5970 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
5e0a86ee
AA
5971 struct utp_task_req_desc treq = { { 0 }, };
5972 int ocs_value;
5973 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
5974
5e0a86ee
AA
5975 switch (msgcode) {
5976 case UPIU_TRANSACTION_NOP_OUT:
5977 cmd_type = DEV_CMD_TYPE_NOP;
5978 /* fall through */
5979 case UPIU_TRANSACTION_QUERY_REQ:
5980 ufshcd_hold(hba, false);
5981 mutex_lock(&hba->dev_cmd.lock);
5982 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
5983 desc_buff, buff_len,
5984 cmd_type, desc_op);
5985 mutex_unlock(&hba->dev_cmd.lock);
5986 ufshcd_release(hba);
5987
5988 break;
5989 case UPIU_TRANSACTION_TASK_REQ:
5990 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5991 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5992
5993 memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
5994
5995 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
5996 if (err == -ETIMEDOUT)
5997 break;
5998
5999 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6000 if (ocs_value != OCS_SUCCESS) {
6001 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6002 ocs_value);
6003 break;
6004 }
6005
6006 memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
6007
6008 break;
6009 default:
6010 err = -EINVAL;
6011
6012 break;
6013 }
6014
5e0a86ee
AA
6015 return err;
6016}
6017
7a3e97b0 6018/**
3441da7d
SRT
6019 * ufshcd_eh_device_reset_handler - device reset handler registered to
6020 * scsi layer.
7a3e97b0
SY
6021 * @cmd: SCSI command pointer
6022 *
6023 * Returns SUCCESS/FAILED
6024 */
3441da7d 6025static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7a3e97b0
SY
6026{
6027 struct Scsi_Host *host;
6028 struct ufs_hba *hba;
6029 unsigned int tag;
6030 u32 pos;
6031 int err;
e2933132
SRT
6032 u8 resp = 0xF;
6033 struct ufshcd_lrb *lrbp;
3441da7d 6034 unsigned long flags;
7a3e97b0
SY
6035
6036 host = cmd->device->host;
6037 hba = shost_priv(host);
6038 tag = cmd->request->tag;
6039
e2933132
SRT
6040 lrbp = &hba->lrb[tag];
6041 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
6042 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3441da7d
SRT
6043 if (!err)
6044 err = resp;
7a3e97b0 6045 goto out;
e2933132 6046 }
7a3e97b0 6047
3441da7d
SRT
6048 /* clear the commands that were pending for corresponding LUN */
6049 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
6050 if (hba->lrb[pos].lun == lrbp->lun) {
6051 err = ufshcd_clear_cmd(hba, pos);
6052 if (err)
6053 break;
7a3e97b0 6054 }
3441da7d
SRT
6055 }
6056 spin_lock_irqsave(host->host_lock, flags);
6057 ufshcd_transfer_req_compl(hba);
6058 spin_unlock_irqrestore(host->host_lock, flags);
7fabb77b 6059
7a3e97b0 6060out:
7fabb77b 6061 hba->req_abort_count = 0;
8808b4e9 6062 ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
3441da7d
SRT
6063 if (!err) {
6064 err = SUCCESS;
6065 } else {
6066 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6067 err = FAILED;
6068 }
7a3e97b0
SY
6069 return err;
6070}
6071
e0b299e3
GB
6072static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
6073{
6074 struct ufshcd_lrb *lrbp;
6075 int tag;
6076
6077 for_each_set_bit(tag, &bitmap, hba->nutrs) {
6078 lrbp = &hba->lrb[tag];
6079 lrbp->req_abort_skip = true;
6080 }
6081}
6082
7a3e97b0
SY
6083/**
6084 * ufshcd_abort - abort a specific command
6085 * @cmd: SCSI command pointer
6086 *
f20810d8
SRT
6087 * Abort the pending command in device by sending UFS_ABORT_TASK task management
6088 * command, and in host controller by clearing the door-bell register. There can
6089 * be race between controller sending the command to the device while abort is
6090 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
6091 * really issued and then try to abort it.
6092 *
7a3e97b0
SY
6093 * Returns SUCCESS/FAILED
6094 */
6095static int ufshcd_abort(struct scsi_cmnd *cmd)
6096{
6097 struct Scsi_Host *host;
6098 struct ufs_hba *hba;
6099 unsigned long flags;
6100 unsigned int tag;
f20810d8
SRT
6101 int err = 0;
6102 int poll_cnt;
e2933132
SRT
6103 u8 resp = 0xF;
6104 struct ufshcd_lrb *lrbp;
e9d501b1 6105 u32 reg;
7a3e97b0
SY
6106
6107 host = cmd->device->host;
6108 hba = shost_priv(host);
6109 tag = cmd->request->tag;
e7d38257 6110 lrbp = &hba->lrb[tag];
14497328
YG
6111 if (!ufshcd_valid_tag(hba, tag)) {
6112 dev_err(hba->dev,
6113 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6114 __func__, tag, cmd, cmd->request);
6115 BUG();
6116 }
7a3e97b0 6117
e7d38257
DR
6118 /*
6119 * Task abort to the device W-LUN is illegal. When this command
6120 * will fail, due to spec violation, scsi err handling next step
6121 * will be to send LU reset which, again, is a spec violation.
6122 * To avoid these unnecessary/illegal step we skip to the last error
6123 * handling stage: reset and restore.
6124 */
6125 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6126 return ufshcd_eh_host_reset_handler(cmd);
6127
1ab27c9c 6128 ufshcd_hold(hba, false);
14497328 6129 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
f20810d8 6130 /* If command is already aborted/completed, return SUCCESS */
14497328
YG
6131 if (!(test_bit(tag, &hba->outstanding_reqs))) {
6132 dev_err(hba->dev,
6133 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6134 __func__, tag, hba->outstanding_reqs, reg);
f20810d8 6135 goto out;
14497328 6136 }
7a3e97b0 6137
e9d501b1
DR
6138 if (!(reg & (1 << tag))) {
6139 dev_err(hba->dev,
6140 "%s: cmd was completed, but without a notifying intr, tag = %d",
6141 __func__, tag);
6142 }
6143
66cc820f
DR
6144 /* Print Transfer Request of aborted task */
6145 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
66cc820f 6146
7fabb77b
GB
6147 /*
6148 * Print detailed info about aborted request.
6149 * As more than one request might get aborted at the same time,
6150 * print full information only for the first aborted request in order
6151 * to reduce repeated printouts. For other aborted requests only print
6152 * basic details.
6153 */
6154 scsi_print_command(hba->lrb[tag].cmd);
6155 if (!hba->req_abort_count) {
8808b4e9 6156 ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
7fabb77b 6157 ufshcd_print_host_regs(hba);
6ba65588 6158 ufshcd_print_host_state(hba);
7fabb77b
GB
6159 ufshcd_print_pwr_info(hba);
6160 ufshcd_print_trs(hba, 1 << tag, true);
6161 } else {
6162 ufshcd_print_trs(hba, 1 << tag, false);
6163 }
6164 hba->req_abort_count++;
e0b299e3
GB
6165
6166 /* Skip task abort in case previous aborts failed and report failure */
6167 if (lrbp->req_abort_skip) {
6168 err = -EIO;
6169 goto out;
6170 }
6171
f20810d8
SRT
6172 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6173 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6174 UFS_QUERY_TASK, &resp);
6175 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6176 /* cmd pending in the device */
ff8e20c6
DR
6177 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6178 __func__, tag);
f20810d8
SRT
6179 break;
6180 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
f20810d8
SRT
6181 /*
6182 * cmd not pending in the device, check if it is
6183 * in transition.
6184 */
ff8e20c6
DR
6185 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6186 __func__, tag);
f20810d8
SRT
6187 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6188 if (reg & (1 << tag)) {
6189 /* sleep for max. 200us to stabilize */
6190 usleep_range(100, 200);
6191 continue;
6192 }
6193 /* command completed already */
ff8e20c6
DR
6194 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6195 __func__, tag);
f20810d8
SRT
6196 goto out;
6197 } else {
ff8e20c6
DR
6198 dev_err(hba->dev,
6199 "%s: no response from device. tag = %d, err %d\n",
6200 __func__, tag, err);
f20810d8
SRT
6201 if (!err)
6202 err = resp; /* service response error */
6203 goto out;
6204 }
6205 }
6206
6207 if (!poll_cnt) {
6208 err = -EBUSY;
7a3e97b0
SY
6209 goto out;
6210 }
7a3e97b0 6211
e2933132
SRT
6212 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6213 UFS_ABORT_TASK, &resp);
6214 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
ff8e20c6 6215 if (!err) {
f20810d8 6216 err = resp; /* service response error */
ff8e20c6
DR
6217 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6218 __func__, tag, err);
6219 }
7a3e97b0 6220 goto out;
e2933132 6221 }
7a3e97b0 6222
f20810d8 6223 err = ufshcd_clear_cmd(hba, tag);
ff8e20c6
DR
6224 if (err) {
6225 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6226 __func__, tag, err);
f20810d8 6227 goto out;
ff8e20c6 6228 }
f20810d8 6229
7a3e97b0
SY
6230 scsi_dma_unmap(cmd);
6231
6232 spin_lock_irqsave(host->host_lock, flags);
a48353f6 6233 ufshcd_outstanding_req_clear(hba, tag);
7a3e97b0
SY
6234 hba->lrb[tag].cmd = NULL;
6235 spin_unlock_irqrestore(host->host_lock, flags);
5a0b0cb9 6236
7a3e97b0 6237out:
f20810d8
SRT
6238 if (!err) {
6239 err = SUCCESS;
6240 } else {
6241 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
e0b299e3 6242 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
f20810d8
SRT
6243 err = FAILED;
6244 }
6245
1ab27c9c
ST
6246 /*
6247 * This ufshcd_release() corresponds to the original scsi cmd that got
6248 * aborted here (as we won't get any IRQ for it).
6249 */
6250 ufshcd_release(hba);
7a3e97b0
SY
6251 return err;
6252}
6253
3441da7d
SRT
6254/**
6255 * ufshcd_host_reset_and_restore - reset and restore host controller
6256 * @hba: per-adapter instance
6257 *
6258 * Note that host controller reset may issue DME_RESET to
6259 * local and remote (device) Uni-Pro stack and the attributes
6260 * are reset to default state.
6261 *
6262 * Returns zero on success, non-zero on failure
6263 */
6264static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6265{
6266 int err;
3441da7d
SRT
6267 unsigned long flags;
6268
2df74b69
CG
6269 /*
6270 * Stop the host controller and complete the requests
6271 * cleared by h/w
6272 */
3441da7d 6273 spin_lock_irqsave(hba->host->host_lock, flags);
596585a2 6274 ufshcd_hba_stop(hba, false);
2df74b69
CG
6275 hba->silence_err_logs = true;
6276 ufshcd_complete_requests(hba);
6277 hba->silence_err_logs = false;
3441da7d
SRT
6278 spin_unlock_irqrestore(hba->host->host_lock, flags);
6279
a3cd5ec5
SJ
6280 /* scale up clocks to max frequency before full reinitialization */
6281 ufshcd_scale_clks(hba, true);
6282
3441da7d
SRT
6283 err = ufshcd_hba_enable(hba);
6284 if (err)
6285 goto out;
6286
6287 /* Establish the link again and restore the device */
1d337ec2
SRT
6288 err = ufshcd_probe_hba(hba);
6289
6290 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
3441da7d
SRT
6291 err = -EIO;
6292out:
6293 if (err)
6294 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
8808b4e9 6295 ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
3441da7d
SRT
6296 return err;
6297}
6298
6299/**
6300 * ufshcd_reset_and_restore - reset and re-initialize host/device
6301 * @hba: per-adapter instance
6302 *
6303 * Reset and recover device, host and re-establish link. This
6304 * is helpful to recover the communication in fatal error conditions.
6305 *
6306 * Returns zero on success, non-zero on failure
6307 */
6308static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6309{
6310 int err = 0;
1d337ec2 6311 int retries = MAX_HOST_RESET_RETRIES;
3441da7d 6312
1d337ec2 6313 do {
d8d9f793
BA
6314 /* Reset the attached device */
6315 ufshcd_vops_device_reset(hba);
6316
1d337ec2
SRT
6317 err = ufshcd_host_reset_and_restore(hba);
6318 } while (err && --retries);
3441da7d 6319
3441da7d
SRT
6320 return err;
6321}
6322
6323/**
6324 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
8aa29f19 6325 * @cmd: SCSI command pointer
3441da7d
SRT
6326 *
6327 * Returns SUCCESS/FAILED
6328 */
6329static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6330{
6331 int err;
6332 unsigned long flags;
6333 struct ufs_hba *hba;
6334
6335 hba = shost_priv(cmd->device->host);
6336
1ab27c9c 6337 ufshcd_hold(hba, false);
3441da7d
SRT
6338 /*
6339 * Check if there is any race with fatal error handling.
6340 * If so, wait for it to complete. Even though fatal error
6341 * handling does reset and restore in some cases, don't assume
6342 * anything out of it. We are just avoiding race here.
6343 */
6344 do {
6345 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271 6346 if (!(work_pending(&hba->eh_work) ||
8dc0da79
ZL
6347 hba->ufshcd_state == UFSHCD_STATE_RESET ||
6348 hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
3441da7d
SRT
6349 break;
6350 spin_unlock_irqrestore(hba->host->host_lock, flags);
6351 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
e8e7f271 6352 flush_work(&hba->eh_work);
3441da7d
SRT
6353 } while (1);
6354
6355 hba->ufshcd_state = UFSHCD_STATE_RESET;
6356 ufshcd_set_eh_in_progress(hba);
6357 spin_unlock_irqrestore(hba->host->host_lock, flags);
6358
6359 err = ufshcd_reset_and_restore(hba);
6360
6361 spin_lock_irqsave(hba->host->host_lock, flags);
6362 if (!err) {
6363 err = SUCCESS;
6364 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6365 } else {
6366 err = FAILED;
6367 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6368 }
6369 ufshcd_clear_eh_in_progress(hba);
6370 spin_unlock_irqrestore(hba->host->host_lock, flags);
6371
1ab27c9c 6372 ufshcd_release(hba);
3441da7d
SRT
6373 return err;
6374}
6375
3a4bf06d
YG
6376/**
6377 * ufshcd_get_max_icc_level - calculate the ICC level
6378 * @sup_curr_uA: max. current supported by the regulator
6379 * @start_scan: row at the desc table to start scan from
6380 * @buff: power descriptor buffer
6381 *
6382 * Returns calculated max ICC level for specific regulator
6383 */
6384static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6385{
6386 int i;
6387 int curr_uA;
6388 u16 data;
6389 u16 unit;
6390
6391 for (i = start_scan; i >= 0; i--) {
d79713f9 6392 data = be16_to_cpup((__be16 *)&buff[2 * i]);
3a4bf06d
YG
6393 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6394 ATTR_ICC_LVL_UNIT_OFFSET;
6395 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6396 switch (unit) {
6397 case UFSHCD_NANO_AMP:
6398 curr_uA = curr_uA / 1000;
6399 break;
6400 case UFSHCD_MILI_AMP:
6401 curr_uA = curr_uA * 1000;
6402 break;
6403 case UFSHCD_AMP:
6404 curr_uA = curr_uA * 1000 * 1000;
6405 break;
6406 case UFSHCD_MICRO_AMP:
6407 default:
6408 break;
6409 }
6410 if (sup_curr_uA >= curr_uA)
6411 break;
6412 }
6413 if (i < 0) {
6414 i = 0;
6415 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6416 }
6417
6418 return (u32)i;
6419}
6420
6421/**
6422 * ufshcd_calc_icc_level - calculate the max ICC level
6423 * In case regulators are not initialized we'll return 0
6424 * @hba: per-adapter instance
6425 * @desc_buf: power descriptor buffer to extract ICC levels from.
6426 * @len: length of desc_buff
6427 *
6428 * Returns calculated ICC level
6429 */
6430static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6431 u8 *desc_buf, int len)
6432{
6433 u32 icc_level = 0;
6434
6435 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6436 !hba->vreg_info.vccq2) {
6437 dev_err(hba->dev,
6438 "%s: Regulator capability was not set, actvIccLevel=%d",
6439 __func__, icc_level);
6440 goto out;
6441 }
6442
0487fff7 6443 if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
3a4bf06d
YG
6444 icc_level = ufshcd_get_max_icc_level(
6445 hba->vreg_info.vcc->max_uA,
6446 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6447 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6448
0487fff7 6449 if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
3a4bf06d
YG
6450 icc_level = ufshcd_get_max_icc_level(
6451 hba->vreg_info.vccq->max_uA,
6452 icc_level,
6453 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6454
0487fff7 6455 if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
3a4bf06d
YG
6456 icc_level = ufshcd_get_max_icc_level(
6457 hba->vreg_info.vccq2->max_uA,
6458 icc_level,
6459 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6460out:
6461 return icc_level;
6462}
6463
6464static void ufshcd_init_icc_levels(struct ufs_hba *hba)
6465{
6466 int ret;
a4b0e8a4 6467 int buff_len = hba->desc_size.pwr_desc;
bbe21d7a
KC
6468 u8 *desc_buf;
6469
6470 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6471 if (!desc_buf)
6472 return;
3a4bf06d
YG
6473
6474 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
6475 if (ret) {
6476 dev_err(hba->dev,
6477 "%s: Failed reading power descriptor.len = %d ret = %d",
6478 __func__, buff_len, ret);
bbe21d7a 6479 goto out;
3a4bf06d
YG
6480 }
6481
6482 hba->init_prefetch_data.icc_level =
6483 ufshcd_find_max_sup_active_icc_level(hba,
6484 desc_buf, buff_len);
6485 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
6486 __func__, hba->init_prefetch_data.icc_level);
6487
dbd34a61
SM
6488 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6489 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
6490 &hba->init_prefetch_data.icc_level);
3a4bf06d
YG
6491
6492 if (ret)
6493 dev_err(hba->dev,
6494 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6495 __func__, hba->init_prefetch_data.icc_level , ret);
6496
bbe21d7a
KC
6497out:
6498 kfree(desc_buf);
3a4bf06d
YG
6499}
6500
2a8fa600
SJ
6501/**
6502 * ufshcd_scsi_add_wlus - Adds required W-LUs
6503 * @hba: per-adapter instance
6504 *
6505 * UFS device specification requires the UFS devices to support 4 well known
6506 * logical units:
6507 * "REPORT_LUNS" (address: 01h)
6508 * "UFS Device" (address: 50h)
6509 * "RPMB" (address: 44h)
6510 * "BOOT" (address: 30h)
6511 * UFS device's power management needs to be controlled by "POWER CONDITION"
6512 * field of SSU (START STOP UNIT) command. But this "power condition" field
6513 * will take effect only when its sent to "UFS device" well known logical unit
6514 * hence we require the scsi_device instance to represent this logical unit in
6515 * order for the UFS host driver to send the SSU command for power management.
8aa29f19 6516 *
2a8fa600
SJ
6517 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6518 * Block) LU so user space process can control this LU. User space may also
6519 * want to have access to BOOT LU.
8aa29f19 6520 *
2a8fa600
SJ
6521 * This function adds scsi device instances for each of all well known LUs
6522 * (except "REPORT LUNS" LU).
6523 *
6524 * Returns zero on success (all required W-LUs are added successfully),
6525 * non-zero error value on failure (if failed to add any of the required W-LU).
6526 */
6527static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6528{
6529 int ret = 0;
7c48bfd0
AM
6530 struct scsi_device *sdev_rpmb;
6531 struct scsi_device *sdev_boot;
2a8fa600
SJ
6532
6533 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6534 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6535 if (IS_ERR(hba->sdev_ufs_device)) {
6536 ret = PTR_ERR(hba->sdev_ufs_device);
6537 hba->sdev_ufs_device = NULL;
6538 goto out;
6539 }
7c48bfd0 6540 scsi_device_put(hba->sdev_ufs_device);
2a8fa600 6541
7c48bfd0 6542 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
2a8fa600 6543 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7c48bfd0
AM
6544 if (IS_ERR(sdev_rpmb)) {
6545 ret = PTR_ERR(sdev_rpmb);
3d21fbde 6546 goto remove_sdev_ufs_device;
2a8fa600 6547 }
7c48bfd0 6548 scsi_device_put(sdev_rpmb);
3d21fbde
HK
6549
6550 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6551 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6552 if (IS_ERR(sdev_boot))
6553 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
6554 else
6555 scsi_device_put(sdev_boot);
2a8fa600
SJ
6556 goto out;
6557
2a8fa600
SJ
6558remove_sdev_ufs_device:
6559 scsi_remove_device(hba->sdev_ufs_device);
6560out:
6561 return ret;
6562}
6563
93fdd5ac
TW
6564static int ufs_get_device_desc(struct ufs_hba *hba,
6565 struct ufs_dev_desc *dev_desc)
c58ab7aa
YG
6566{
6567 int err;
bbe21d7a 6568 size_t buff_len;
c58ab7aa 6569 u8 model_index;
bbe21d7a
KC
6570 u8 *desc_buf;
6571
4b828fe1
TW
6572 if (!dev_desc)
6573 return -EINVAL;
6574
bbe21d7a
KC
6575 buff_len = max_t(size_t, hba->desc_size.dev_desc,
6576 QUERY_DESC_MAX_SIZE + 1);
6577 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6578 if (!desc_buf) {
6579 err = -ENOMEM;
6580 goto out;
6581 }
c58ab7aa 6582
a4b0e8a4 6583 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
c58ab7aa
YG
6584 if (err) {
6585 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6586 __func__, err);
6587 goto out;
6588 }
6589
6590 /*
6591 * getting vendor (manufacturerID) and Bank Index in big endian
6592 * format
6593 */
93fdd5ac 6594 dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
c58ab7aa
YG
6595 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6596
6597 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
4b828fe1
TW
6598 err = ufshcd_read_string_desc(hba, model_index,
6599 &dev_desc->model, SD_ASCII_STD);
6600 if (err < 0) {
c58ab7aa
YG
6601 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6602 __func__, err);
6603 goto out;
6604 }
6605
4b828fe1
TW
6606 /*
6607 * ufshcd_read_string_desc returns size of the string
6608 * reset the error value
6609 */
6610 err = 0;
c58ab7aa
YG
6611
6612out:
bbe21d7a 6613 kfree(desc_buf);
c58ab7aa
YG
6614 return err;
6615}
6616
4b828fe1
TW
6617static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc)
6618{
6619 kfree(dev_desc->model);
6620 dev_desc->model = NULL;
6621}
6622
93fdd5ac
TW
6623static void ufs_fixup_device_setup(struct ufs_hba *hba,
6624 struct ufs_dev_desc *dev_desc)
c58ab7aa 6625{
c58ab7aa 6626 struct ufs_dev_fix *f;
c58ab7aa
YG
6627
6628 for (f = ufs_fixups; f->quirk; f++) {
93fdd5ac
TW
6629 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6630 f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
4b828fe1
TW
6631 ((dev_desc->model &&
6632 STR_PRFX_EQUAL(f->card.model, dev_desc->model)) ||
6633 !strcmp(f->card.model, UFS_ANY_MODEL)))
c58ab7aa
YG
6634 hba->dev_quirks |= f->quirk;
6635 }
6636}
6637
37113106
YG
6638/**
6639 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6640 * @hba: per-adapter instance
6641 *
6642 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6643 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6644 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6645 * the hibern8 exit latency.
6646 *
6647 * Returns zero on success, non-zero error value on failure.
6648 */
6649static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6650{
6651 int ret = 0;
6652 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6653
6654 ret = ufshcd_dme_peer_get(hba,
6655 UIC_ARG_MIB_SEL(
6656 RX_MIN_ACTIVATETIME_CAPABILITY,
6657 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6658 &peer_rx_min_activatetime);
6659 if (ret)
6660 goto out;
6661
6662 /* make sure proper unit conversion is applied */
6663 tuned_pa_tactivate =
6664 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6665 / PA_TACTIVATE_TIME_UNIT_US);
6666 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6667 tuned_pa_tactivate);
6668
6669out:
6670 return ret;
6671}
6672
6673/**
6674 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6675 * @hba: per-adapter instance
6676 *
6677 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6678 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6679 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6680 * This optimal value can help reduce the hibern8 exit latency.
6681 *
6682 * Returns zero on success, non-zero error value on failure.
6683 */
6684static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6685{
6686 int ret = 0;
6687 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6688 u32 max_hibern8_time, tuned_pa_hibern8time;
6689
6690 ret = ufshcd_dme_get(hba,
6691 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6692 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6693 &local_tx_hibern8_time_cap);
6694 if (ret)
6695 goto out;
6696
6697 ret = ufshcd_dme_peer_get(hba,
6698 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6699 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6700 &peer_rx_hibern8_time_cap);
6701 if (ret)
6702 goto out;
6703
6704 max_hibern8_time = max(local_tx_hibern8_time_cap,
6705 peer_rx_hibern8_time_cap);
6706 /* make sure proper unit conversion is applied */
6707 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6708 / PA_HIBERN8_TIME_UNIT_US);
6709 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6710 tuned_pa_hibern8time);
6711out:
6712 return ret;
6713}
6714
c6a6db43
SJ
6715/**
6716 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
6717 * less than device PA_TACTIVATE time.
6718 * @hba: per-adapter instance
6719 *
6720 * Some UFS devices require host PA_TACTIVATE to be lower than device
6721 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
6722 * for such devices.
6723 *
6724 * Returns zero on success, non-zero error value on failure.
6725 */
6726static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
6727{
6728 int ret = 0;
6729 u32 granularity, peer_granularity;
6730 u32 pa_tactivate, peer_pa_tactivate;
6731 u32 pa_tactivate_us, peer_pa_tactivate_us;
6732 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
6733
6734 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6735 &granularity);
6736 if (ret)
6737 goto out;
6738
6739 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6740 &peer_granularity);
6741 if (ret)
6742 goto out;
6743
6744 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
6745 (granularity > PA_GRANULARITY_MAX_VAL)) {
6746 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
6747 __func__, granularity);
6748 return -EINVAL;
6749 }
6750
6751 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
6752 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
6753 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
6754 __func__, peer_granularity);
6755 return -EINVAL;
6756 }
6757
6758 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
6759 if (ret)
6760 goto out;
6761
6762 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
6763 &peer_pa_tactivate);
6764 if (ret)
6765 goto out;
6766
6767 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
6768 peer_pa_tactivate_us = peer_pa_tactivate *
6769 gran_to_us_table[peer_granularity - 1];
6770
6771 if (pa_tactivate_us > peer_pa_tactivate_us) {
6772 u32 new_peer_pa_tactivate;
6773
6774 new_peer_pa_tactivate = pa_tactivate_us /
6775 gran_to_us_table[peer_granularity - 1];
6776 new_peer_pa_tactivate++;
6777 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6778 new_peer_pa_tactivate);
6779 }
6780
6781out:
6782 return ret;
6783}
6784
37113106
YG
6785static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6786{
6787 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6788 ufshcd_tune_pa_tactivate(hba);
6789 ufshcd_tune_pa_hibern8time(hba);
6790 }
6791
6792 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6793 /* set 1ms timeout for PA_TACTIVATE */
6794 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
c6a6db43
SJ
6795
6796 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
6797 ufshcd_quirk_tune_host_pa_tactivate(hba);
56d4a186
SJ
6798
6799 ufshcd_vops_apply_dev_quirks(hba);
37113106
YG
6800}
6801
ff8e20c6
DR
6802static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6803{
ff8e20c6
DR
6804 hba->ufs_stats.hibern8_exit_cnt = 0;
6805 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
7fabb77b 6806 hba->req_abort_count = 0;
ff8e20c6
DR
6807}
6808
a4b0e8a4
PM
6809static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6810{
6811 int err;
6812
6813 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6814 &hba->desc_size.dev_desc);
6815 if (err)
6816 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6817
6818 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6819 &hba->desc_size.pwr_desc);
6820 if (err)
6821 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6822
6823 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6824 &hba->desc_size.interc_desc);
6825 if (err)
6826 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6827
6828 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6829 &hba->desc_size.conf_desc);
6830 if (err)
6831 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6832
6833 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6834 &hba->desc_size.unit_desc);
6835 if (err)
6836 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6837
6838 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6839 &hba->desc_size.geom_desc);
6840 if (err)
6841 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
059efd84 6842
c648c2d2
SN
6843 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
6844 &hba->desc_size.hlth_desc);
6845 if (err)
6846 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
a4b0e8a4
PM
6847}
6848
9e1e8a75
SJ
6849static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
6850 {19200000, REF_CLK_FREQ_19_2_MHZ},
6851 {26000000, REF_CLK_FREQ_26_MHZ},
6852 {38400000, REF_CLK_FREQ_38_4_MHZ},
6853 {52000000, REF_CLK_FREQ_52_MHZ},
6854 {0, REF_CLK_FREQ_INVAL},
6855};
6856
6857static enum ufs_ref_clk_freq
6858ufs_get_bref_clk_from_hz(unsigned long freq)
6859{
6860 int i;
6861
6862 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
6863 if (ufs_ref_clk_freqs[i].freq_hz == freq)
6864 return ufs_ref_clk_freqs[i].val;
6865
6866 return REF_CLK_FREQ_INVAL;
6867}
6868
6869void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
6870{
6871 unsigned long freq;
6872
6873 freq = clk_get_rate(refclk);
6874
6875 hba->dev_ref_clk_freq =
6876 ufs_get_bref_clk_from_hz(freq);
6877
6878 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
6879 dev_err(hba->dev,
6880 "invalid ref_clk setting = %ld\n", freq);
6881}
6882
6883static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
6884{
6885 int err;
6886 u32 ref_clk;
6887 u32 freq = hba->dev_ref_clk_freq;
6888
6889 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6890 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
6891
6892 if (err) {
6893 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
6894 err);
6895 goto out;
6896 }
6897
6898 if (ref_clk == freq)
6899 goto out; /* nothing to update */
6900
6901 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6902 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
6903
6904 if (err) {
6905 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
6906 ufs_ref_clk_freqs[freq].freq_hz);
6907 goto out;
6908 }
6909
6910 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
6911 ufs_ref_clk_freqs[freq].freq_hz);
6912
6913out:
6914 return err;
6915}
6916
6ccf44fe 6917/**
1d337ec2
SRT
6918 * ufshcd_probe_hba - probe hba to detect device and initialize
6919 * @hba: per-adapter instance
6920 *
6921 * Execute link-startup and verify device initialization
6ccf44fe 6922 */
1d337ec2 6923static int ufshcd_probe_hba(struct ufs_hba *hba)
6ccf44fe 6924{
93fdd5ac 6925 struct ufs_dev_desc card = {0};
6ccf44fe 6926 int ret;
7ff5ab47 6927 ktime_t start = ktime_get();
6ccf44fe
SJ
6928
6929 ret = ufshcd_link_startup(hba);
5a0b0cb9
SRT
6930 if (ret)
6931 goto out;
6932
afdfff59
YG
6933 /* set the default level for urgent bkops */
6934 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6935 hba->is_urgent_bkops_lvl_checked = false;
6936
ff8e20c6
DR
6937 /* Debug counters initialization */
6938 ufshcd_clear_dbg_ufs_stats(hba);
6939
57d104c1
SJ
6940 /* UniPro link is active now */
6941 ufshcd_set_link_active(hba);
d3e89bac 6942
5a0b0cb9
SRT
6943 ret = ufshcd_verify_dev_init(hba);
6944 if (ret)
6945 goto out;
68078d5c
DR
6946
6947 ret = ufshcd_complete_dev_init(hba);
6948 if (ret)
6949 goto out;
5a0b0cb9 6950
a4b0e8a4
PM
6951 /* Init check for device descriptor sizes */
6952 ufshcd_init_desc_sizes(hba);
6953
93fdd5ac
TW
6954 ret = ufs_get_device_desc(hba, &card);
6955 if (ret) {
6956 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6957 __func__, ret);
6958 goto out;
6959 }
6960
6961 ufs_fixup_device_setup(hba, &card);
4b828fe1
TW
6962 ufs_put_device_desc(&card);
6963
37113106 6964 ufshcd_tune_unipro_params(hba);
60f01870 6965
57d104c1
SJ
6966 /* UFS device is also active now */
6967 ufshcd_set_ufs_dev_active(hba);
66ec6d59 6968 ufshcd_force_reset_auto_bkops(hba);
57d104c1
SJ
6969 hba->wlun_dev_clr_ua = true;
6970
7eb584db
DR
6971 if (ufshcd_get_max_pwr_mode(hba)) {
6972 dev_err(hba->dev,
6973 "%s: Failed getting max supported power mode\n",
6974 __func__);
6975 } else {
9e1e8a75
SJ
6976 /*
6977 * Set the right value to bRefClkFreq before attempting to
6978 * switch to HS gears.
6979 */
6980 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
6981 ufshcd_set_dev_ref_clk(hba);
7eb584db 6982 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8643ae66 6983 if (ret) {
7eb584db
DR
6984 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6985 __func__, ret);
8643ae66
DL
6986 goto out;
6987 }
7eb584db 6988 }
57d104c1 6989
53c12d0e
YG
6990 /* set the state as operational after switching to desired gear */
6991 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
a4b0e8a4 6992
71d848b8
CG
6993 /* Enable Auto-Hibernate if configured */
6994 ufshcd_auto_hibern8_enable(hba);
6995
57d104c1
SJ
6996 /*
6997 * If we are in error handling context or in power management callbacks
6998 * context, no need to scan the host
6999 */
7000 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
7001 bool flag;
7002
7003 /* clear any previous UFS device information */
7004 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
dc3c8d3a
YG
7005 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7006 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
57d104c1 7007 hba->dev_info.f_power_on_wp_en = flag;
3441da7d 7008
3a4bf06d
YG
7009 if (!hba->is_init_prefetch)
7010 ufshcd_init_icc_levels(hba);
7011
2a8fa600
SJ
7012 /* Add required well known logical units to scsi mid layer */
7013 if (ufshcd_scsi_add_wlus(hba))
7014 goto out;
7015
0701e49d
SJ
7016 /* Initialize devfreq after UFS device is detected */
7017 if (ufshcd_is_clkscaling_supported(hba)) {
7018 memcpy(&hba->clk_scaling.saved_pwr_info.info,
7019 &hba->pwr_info,
7020 sizeof(struct ufs_pa_layer_attr));
7021 hba->clk_scaling.saved_pwr_info.is_valid = true;
7022 if (!hba->devfreq) {
deac444f
BA
7023 ret = ufshcd_devfreq_init(hba);
7024 if (ret)
0701e49d 7025 goto out;
0701e49d
SJ
7026 }
7027 hba->clk_scaling.is_allowed = true;
7028 }
7029
df032bf2
AA
7030 ufs_bsg_probe(hba);
7031
3441da7d
SRT
7032 scsi_scan_host(hba->host);
7033 pm_runtime_put_sync(hba->dev);
7034 }
3a4bf06d
YG
7035
7036 if (!hba->is_init_prefetch)
7037 hba->is_init_prefetch = true;
7038
5a0b0cb9 7039out:
1d337ec2
SRT
7040 /*
7041 * If we failed to initialize the device or the device is not
7042 * present, turn off the power/clocks etc.
7043 */
57d104c1
SJ
7044 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
7045 pm_runtime_put_sync(hba->dev);
eebcc196 7046 ufshcd_exit_clk_scaling(hba);
1d337ec2 7047 ufshcd_hba_exit(hba);
57d104c1 7048 }
1d337ec2 7049
7ff5ab47
SJ
7050 trace_ufshcd_init(dev_name(hba->dev), ret,
7051 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 7052 hba->curr_dev_pwr_mode, hba->uic_link_state);
1d337ec2
SRT
7053 return ret;
7054}
7055
7056/**
7057 * ufshcd_async_scan - asynchronous execution for probing hba
7058 * @data: data pointer to pass to this function
7059 * @cookie: cookie data
7060 */
7061static void ufshcd_async_scan(void *data, async_cookie_t cookie)
7062{
7063 struct ufs_hba *hba = (struct ufs_hba *)data;
7064
7065 ufshcd_probe_hba(hba);
6ccf44fe
SJ
7066}
7067
f550c65b
YG
7068static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
7069{
7070 unsigned long flags;
7071 struct Scsi_Host *host;
7072 struct ufs_hba *hba;
7073 int index;
7074 bool found = false;
7075
7076 if (!scmd || !scmd->device || !scmd->device->host)
6600593c 7077 return BLK_EH_DONE;
f550c65b
YG
7078
7079 host = scmd->device->host;
7080 hba = shost_priv(host);
7081 if (!hba)
6600593c 7082 return BLK_EH_DONE;
f550c65b
YG
7083
7084 spin_lock_irqsave(host->host_lock, flags);
7085
7086 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
7087 if (hba->lrb[index].cmd == scmd) {
7088 found = true;
7089 break;
7090 }
7091 }
7092
7093 spin_unlock_irqrestore(host->host_lock, flags);
7094
7095 /*
7096 * Bypass SCSI error handling and reset the block layer timer if this
7097 * SCSI command was not actually dispatched to UFS driver, otherwise
7098 * let SCSI layer handle the error as usual.
7099 */
6600593c 7100 return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
f550c65b
YG
7101}
7102
d829fc8a
SN
7103static const struct attribute_group *ufshcd_driver_groups[] = {
7104 &ufs_sysfs_unit_descriptor_group,
ec92b59c 7105 &ufs_sysfs_lun_attributes_group,
d829fc8a
SN
7106 NULL,
7107};
7108
7a3e97b0
SY
7109static struct scsi_host_template ufshcd_driver_template = {
7110 .module = THIS_MODULE,
7111 .name = UFSHCD,
7112 .proc_name = UFSHCD,
7113 .queuecommand = ufshcd_queuecommand,
7114 .slave_alloc = ufshcd_slave_alloc,
eeda4749 7115 .slave_configure = ufshcd_slave_configure,
7a3e97b0 7116 .slave_destroy = ufshcd_slave_destroy,
4264fd61 7117 .change_queue_depth = ufshcd_change_queue_depth,
7a3e97b0 7118 .eh_abort_handler = ufshcd_abort,
3441da7d
SRT
7119 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7120 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
f550c65b 7121 .eh_timed_out = ufshcd_eh_timed_out,
7a3e97b0
SY
7122 .this_id = -1,
7123 .sg_tablesize = SG_ALL,
7124 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
7125 .can_queue = UFSHCD_CAN_QUEUE,
552a990c 7126 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
1ab27c9c 7127 .max_host_blocked = 1,
c40ecc12 7128 .track_queue_depth = 1,
d829fc8a 7129 .sdev_groups = ufshcd_driver_groups,
4af14d11 7130 .dma_boundary = PAGE_SIZE - 1,
49615ba1 7131 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
7a3e97b0
SY
7132};
7133
57d104c1
SJ
7134static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7135 int ua)
7136{
7b16a07c 7137 int ret;
57d104c1 7138
7b16a07c
BA
7139 if (!vreg)
7140 return 0;
57d104c1 7141
0487fff7
SC
7142 /*
7143 * "set_load" operation shall be required on those regulators
7144 * which specifically configured current limitation. Otherwise
7145 * zero max_uA may cause unexpected behavior when regulator is
7146 * enabled or set as high power mode.
7147 */
7148 if (!vreg->max_uA)
7149 return 0;
7150
7b16a07c
BA
7151 ret = regulator_set_load(vreg->reg, ua);
7152 if (ret < 0) {
7153 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7154 __func__, vreg->name, ua, ret);
57d104c1
SJ
7155 }
7156
7157 return ret;
7158}
7159
7160static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7161 struct ufs_vreg *vreg)
7162{
73067981 7163 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
57d104c1
SJ
7164}
7165
7166static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7167 struct ufs_vreg *vreg)
7168{
7c7cfdcf
AH
7169 if (!vreg)
7170 return 0;
7171
73067981 7172 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
57d104c1
SJ
7173}
7174
aa497613
SRT
7175static int ufshcd_config_vreg(struct device *dev,
7176 struct ufs_vreg *vreg, bool on)
7177{
7178 int ret = 0;
72753590
GS
7179 struct regulator *reg;
7180 const char *name;
aa497613
SRT
7181 int min_uV, uA_load;
7182
7183 BUG_ON(!vreg);
7184
72753590
GS
7185 reg = vreg->reg;
7186 name = vreg->name;
7187
aa497613 7188 if (regulator_count_voltages(reg) > 0) {
3b141e8c
SC
7189 if (vreg->min_uV && vreg->max_uV) {
7190 min_uV = on ? vreg->min_uV : 0;
7191 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7192 if (ret) {
7193 dev_err(dev,
7194 "%s: %s set voltage failed, err=%d\n",
aa497613 7195 __func__, name, ret);
3b141e8c
SC
7196 goto out;
7197 }
aa497613
SRT
7198 }
7199
7200 uA_load = on ? vreg->max_uA : 0;
57d104c1
SJ
7201 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7202 if (ret)
aa497613 7203 goto out;
aa497613
SRT
7204 }
7205out:
7206 return ret;
7207}
7208
7209static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7210{
7211 int ret = 0;
7212
73067981 7213 if (!vreg || vreg->enabled)
aa497613
SRT
7214 goto out;
7215
7216 ret = ufshcd_config_vreg(dev, vreg, true);
7217 if (!ret)
7218 ret = regulator_enable(vreg->reg);
7219
7220 if (!ret)
7221 vreg->enabled = true;
7222 else
7223 dev_err(dev, "%s: %s enable failed, err=%d\n",
7224 __func__, vreg->name, ret);
7225out:
7226 return ret;
7227}
7228
7229static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
7230{
7231 int ret = 0;
7232
73067981 7233 if (!vreg || !vreg->enabled)
aa497613
SRT
7234 goto out;
7235
7236 ret = regulator_disable(vreg->reg);
7237
7238 if (!ret) {
7239 /* ignore errors on applying disable config */
7240 ufshcd_config_vreg(dev, vreg, false);
7241 vreg->enabled = false;
7242 } else {
7243 dev_err(dev, "%s: %s disable failed, err=%d\n",
7244 __func__, vreg->name, ret);
7245 }
7246out:
7247 return ret;
7248}
7249
7250static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7251{
7252 int ret = 0;
7253 struct device *dev = hba->dev;
7254 struct ufs_vreg_info *info = &hba->vreg_info;
7255
aa497613
SRT
7256 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
7257 if (ret)
7258 goto out;
7259
7260 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
7261 if (ret)
7262 goto out;
7263
7264 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7265 if (ret)
7266 goto out;
7267
7268out:
7269 if (ret) {
7270 ufshcd_toggle_vreg(dev, info->vccq2, false);
7271 ufshcd_toggle_vreg(dev, info->vccq, false);
7272 ufshcd_toggle_vreg(dev, info->vcc, false);
7273 }
7274 return ret;
7275}
7276
6a771a65
RS
7277static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7278{
7279 struct ufs_vreg_info *info = &hba->vreg_info;
7280
60b7b823 7281 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
6a771a65
RS
7282}
7283
aa497613
SRT
7284static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7285{
7286 int ret = 0;
7287
7288 if (!vreg)
7289 goto out;
7290
7291 vreg->reg = devm_regulator_get(dev, vreg->name);
7292 if (IS_ERR(vreg->reg)) {
7293 ret = PTR_ERR(vreg->reg);
7294 dev_err(dev, "%s: %s get failed, err=%d\n",
7295 __func__, vreg->name, ret);
7296 }
7297out:
7298 return ret;
7299}
7300
7301static int ufshcd_init_vreg(struct ufs_hba *hba)
7302{
7303 int ret = 0;
7304 struct device *dev = hba->dev;
7305 struct ufs_vreg_info *info = &hba->vreg_info;
7306
aa497613
SRT
7307 ret = ufshcd_get_vreg(dev, info->vcc);
7308 if (ret)
7309 goto out;
7310
7311 ret = ufshcd_get_vreg(dev, info->vccq);
7312 if (ret)
7313 goto out;
7314
7315 ret = ufshcd_get_vreg(dev, info->vccq2);
7316out:
7317 return ret;
7318}
7319
6a771a65
RS
7320static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
7321{
7322 struct ufs_vreg_info *info = &hba->vreg_info;
7323
7324 if (info)
7325 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
7326
7327 return 0;
7328}
7329
57d104c1
SJ
7330static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7331 bool skip_ref_clk)
c6e79dac
SRT
7332{
7333 int ret = 0;
7334 struct ufs_clk_info *clki;
7335 struct list_head *head = &hba->clk_list_head;
1ab27c9c 7336 unsigned long flags;
911a0771
SJ
7337 ktime_t start = ktime_get();
7338 bool clk_state_changed = false;
c6e79dac 7339
566ec9ad 7340 if (list_empty(head))
c6e79dac
SRT
7341 goto out;
7342
b334456e
SJ
7343 /*
7344 * vendor specific setup_clocks ops may depend on clocks managed by
7345 * this standard driver hence call the vendor specific setup_clocks
7346 * before disabling the clocks managed here.
7347 */
7348 if (!on) {
7349 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7350 if (ret)
7351 return ret;
7352 }
1e879e8f 7353
c6e79dac
SRT
7354 list_for_each_entry(clki, head, list) {
7355 if (!IS_ERR_OR_NULL(clki->clk)) {
57d104c1
SJ
7356 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7357 continue;
7358
911a0771 7359 clk_state_changed = on ^ clki->enabled;
c6e79dac
SRT
7360 if (on && !clki->enabled) {
7361 ret = clk_prepare_enable(clki->clk);
7362 if (ret) {
7363 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7364 __func__, clki->name, ret);
7365 goto out;
7366 }
7367 } else if (!on && clki->enabled) {
7368 clk_disable_unprepare(clki->clk);
7369 }
7370 clki->enabled = on;
7371 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7372 clki->name, on ? "en" : "dis");
7373 }
7374 }
1ab27c9c 7375
b334456e
SJ
7376 /*
7377 * vendor specific setup_clocks ops may depend on clocks managed by
7378 * this standard driver hence call the vendor specific setup_clocks
7379 * after enabling the clocks managed here.
7380 */
7381 if (on) {
7382 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7383 if (ret)
7384 return ret;
7385 }
1e879e8f 7386
c6e79dac
SRT
7387out:
7388 if (ret) {
7389 list_for_each_entry(clki, head, list) {
7390 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7391 clk_disable_unprepare(clki->clk);
7392 }
7ff5ab47 7393 } else if (!ret && on) {
1ab27c9c
ST
7394 spin_lock_irqsave(hba->host->host_lock, flags);
7395 hba->clk_gating.state = CLKS_ON;
7ff5ab47
SJ
7396 trace_ufshcd_clk_gating(dev_name(hba->dev),
7397 hba->clk_gating.state);
1ab27c9c 7398 spin_unlock_irqrestore(hba->host->host_lock, flags);
c6e79dac 7399 }
7ff5ab47 7400
911a0771
SJ
7401 if (clk_state_changed)
7402 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7403 (on ? "on" : "off"),
7404 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
c6e79dac
SRT
7405 return ret;
7406}
7407
57d104c1
SJ
7408static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7409{
7410 return __ufshcd_setup_clocks(hba, on, false);
7411}
7412
c6e79dac
SRT
7413static int ufshcd_init_clocks(struct ufs_hba *hba)
7414{
7415 int ret = 0;
7416 struct ufs_clk_info *clki;
7417 struct device *dev = hba->dev;
7418 struct list_head *head = &hba->clk_list_head;
7419
566ec9ad 7420 if (list_empty(head))
c6e79dac
SRT
7421 goto out;
7422
7423 list_for_each_entry(clki, head, list) {
7424 if (!clki->name)
7425 continue;
7426
7427 clki->clk = devm_clk_get(dev, clki->name);
7428 if (IS_ERR(clki->clk)) {
7429 ret = PTR_ERR(clki->clk);
7430 dev_err(dev, "%s: %s clk get failed, %d\n",
7431 __func__, clki->name, ret);
7432 goto out;
7433 }
7434
9e1e8a75
SJ
7435 /*
7436 * Parse device ref clk freq as per device tree "ref_clk".
7437 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
7438 * in ufshcd_alloc_host().
7439 */
7440 if (!strcmp(clki->name, "ref_clk"))
7441 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
7442
c6e79dac
SRT
7443 if (clki->max_freq) {
7444 ret = clk_set_rate(clki->clk, clki->max_freq);
7445 if (ret) {
7446 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7447 __func__, clki->name,
7448 clki->max_freq, ret);
7449 goto out;
7450 }
856b3483 7451 clki->curr_freq = clki->max_freq;
c6e79dac
SRT
7452 }
7453 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7454 clki->name, clk_get_rate(clki->clk));
7455 }
7456out:
7457 return ret;
7458}
7459
5c0c28a8
SRT
7460static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7461{
7462 int err = 0;
7463
7464 if (!hba->vops)
7465 goto out;
7466
0263bcd0
YG
7467 err = ufshcd_vops_init(hba);
7468 if (err)
7469 goto out;
5c0c28a8 7470
0263bcd0
YG
7471 err = ufshcd_vops_setup_regulators(hba, true);
7472 if (err)
7473 goto out_exit;
5c0c28a8
SRT
7474
7475 goto out;
7476
5c0c28a8 7477out_exit:
0263bcd0 7478 ufshcd_vops_exit(hba);
5c0c28a8
SRT
7479out:
7480 if (err)
7481 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
0263bcd0 7482 __func__, ufshcd_get_var_name(hba), err);
5c0c28a8
SRT
7483 return err;
7484}
7485
7486static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7487{
7488 if (!hba->vops)
7489 return;
7490
0263bcd0 7491 ufshcd_vops_setup_regulators(hba, false);
5c0c28a8 7492
0263bcd0 7493 ufshcd_vops_exit(hba);
5c0c28a8
SRT
7494}
7495
aa497613
SRT
7496static int ufshcd_hba_init(struct ufs_hba *hba)
7497{
7498 int err;
7499
6a771a65
RS
7500 /*
7501 * Handle host controller power separately from the UFS device power
7502 * rails as it will help controlling the UFS host controller power
7503 * collapse easily which is different than UFS device power collapse.
7504 * Also, enable the host controller power before we go ahead with rest
7505 * of the initialization here.
7506 */
7507 err = ufshcd_init_hba_vreg(hba);
aa497613
SRT
7508 if (err)
7509 goto out;
7510
6a771a65 7511 err = ufshcd_setup_hba_vreg(hba, true);
aa497613
SRT
7512 if (err)
7513 goto out;
7514
6a771a65
RS
7515 err = ufshcd_init_clocks(hba);
7516 if (err)
7517 goto out_disable_hba_vreg;
7518
7519 err = ufshcd_setup_clocks(hba, true);
7520 if (err)
7521 goto out_disable_hba_vreg;
7522
c6e79dac
SRT
7523 err = ufshcd_init_vreg(hba);
7524 if (err)
7525 goto out_disable_clks;
7526
7527 err = ufshcd_setup_vreg(hba, true);
7528 if (err)
7529 goto out_disable_clks;
7530
aa497613
SRT
7531 err = ufshcd_variant_hba_init(hba);
7532 if (err)
7533 goto out_disable_vreg;
7534
1d337ec2 7535 hba->is_powered = true;
aa497613
SRT
7536 goto out;
7537
7538out_disable_vreg:
7539 ufshcd_setup_vreg(hba, false);
c6e79dac
SRT
7540out_disable_clks:
7541 ufshcd_setup_clocks(hba, false);
6a771a65
RS
7542out_disable_hba_vreg:
7543 ufshcd_setup_hba_vreg(hba, false);
aa497613
SRT
7544out:
7545 return err;
7546}
7547
7548static void ufshcd_hba_exit(struct ufs_hba *hba)
7549{
1d337ec2
SRT
7550 if (hba->is_powered) {
7551 ufshcd_variant_hba_exit(hba);
7552 ufshcd_setup_vreg(hba, false);
a508253d 7553 ufshcd_suspend_clkscaling(hba);
eebcc196 7554 if (ufshcd_is_clkscaling_supported(hba))
0701e49d
SJ
7555 if (hba->devfreq)
7556 ufshcd_suspend_clkscaling(hba);
1d337ec2
SRT
7557 ufshcd_setup_clocks(hba, false);
7558 ufshcd_setup_hba_vreg(hba, false);
7559 hba->is_powered = false;
7560 }
aa497613
SRT
7561}
7562
57d104c1
SJ
7563static int
7564ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7565{
7566 unsigned char cmd[6] = {REQUEST_SENSE,
7567 0,
7568 0,
7569 0,
09a5a24f 7570 UFS_SENSE_SIZE,
57d104c1
SJ
7571 0};
7572 char *buffer;
7573 int ret;
7574
09a5a24f 7575 buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
57d104c1
SJ
7576 if (!buffer) {
7577 ret = -ENOMEM;
7578 goto out;
7579 }
7580
fcbfffe2 7581 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
09a5a24f 7582 UFS_SENSE_SIZE, NULL, NULL,
fcbfffe2 7583 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
57d104c1
SJ
7584 if (ret)
7585 pr_err("%s: failed with err %d\n", __func__, ret);
7586
7587 kfree(buffer);
7588out:
7589 return ret;
7590}
7591
7592/**
7593 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7594 * power mode
7595 * @hba: per adapter instance
7596 * @pwr_mode: device power mode to set
7597 *
7598 * Returns 0 if requested power mode is set successfully
7599 * Returns non-zero if failed to set the requested power mode
7600 */
7601static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7602 enum ufs_dev_pwr_mode pwr_mode)
7603{
7604 unsigned char cmd[6] = { START_STOP };
7605 struct scsi_sense_hdr sshdr;
7c48bfd0
AM
7606 struct scsi_device *sdp;
7607 unsigned long flags;
57d104c1
SJ
7608 int ret;
7609
7c48bfd0
AM
7610 spin_lock_irqsave(hba->host->host_lock, flags);
7611 sdp = hba->sdev_ufs_device;
7612 if (sdp) {
7613 ret = scsi_device_get(sdp);
7614 if (!ret && !scsi_device_online(sdp)) {
7615 ret = -ENODEV;
7616 scsi_device_put(sdp);
7617 }
7618 } else {
7619 ret = -ENODEV;
7620 }
7621 spin_unlock_irqrestore(hba->host->host_lock, flags);
7622
7623 if (ret)
7624 return ret;
57d104c1
SJ
7625
7626 /*
7627 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7628 * handling, which would wait for host to be resumed. Since we know
7629 * we are functional while we are here, skip host resume in error
7630 * handling context.
7631 */
7632 hba->host->eh_noresume = 1;
7633 if (hba->wlun_dev_clr_ua) {
7634 ret = ufshcd_send_request_sense(hba, sdp);
7635 if (ret)
7636 goto out;
7637 /* Unit attention condition is cleared now */
7638 hba->wlun_dev_clr_ua = false;
7639 }
7640
7641 cmd[4] = pwr_mode << 4;
7642
7643 /*
7644 * Current function would be generally called from the power management
e8064021 7645 * callbacks hence set the RQF_PM flag so that it doesn't resume the
57d104c1
SJ
7646 * already suspended childs.
7647 */
fcbfffe2
CH
7648 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7649 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
57d104c1
SJ
7650 if (ret) {
7651 sdev_printk(KERN_WARNING, sdp,
ef61329d
HR
7652 "START_STOP failed for power mode: %d, result %x\n",
7653 pwr_mode, ret);
c65be1a6 7654 if (driver_byte(ret) == DRIVER_SENSE)
21045519 7655 scsi_print_sense_hdr(sdp, NULL, &sshdr);
57d104c1
SJ
7656 }
7657
7658 if (!ret)
7659 hba->curr_dev_pwr_mode = pwr_mode;
7660out:
7c48bfd0 7661 scsi_device_put(sdp);
57d104c1
SJ
7662 hba->host->eh_noresume = 0;
7663 return ret;
7664}
7665
7666static int ufshcd_link_state_transition(struct ufs_hba *hba,
7667 enum uic_link_state req_link_state,
7668 int check_for_bkops)
7669{
7670 int ret = 0;
7671
7672 if (req_link_state == hba->uic_link_state)
7673 return 0;
7674
7675 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
7676 ret = ufshcd_uic_hibern8_enter(hba);
7677 if (!ret)
7678 ufshcd_set_link_hibern8(hba);
7679 else
7680 goto out;
7681 }
7682 /*
7683 * If autobkops is enabled, link can't be turned off because
7684 * turning off the link would also turn off the device.
7685 */
7686 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
dc30c9e6 7687 (!check_for_bkops || !hba->auto_bkops_enabled)) {
f3099fbd
YG
7688 /*
7689 * Let's make sure that link is in low power mode, we are doing
7690 * this currently by putting the link in Hibern8. Otherway to
7691 * put the link in low power mode is to send the DME end point
7692 * to device and then send the DME reset command to local
7693 * unipro. But putting the link in hibern8 is much faster.
7694 */
7695 ret = ufshcd_uic_hibern8_enter(hba);
7696 if (ret)
7697 goto out;
57d104c1
SJ
7698 /*
7699 * Change controller state to "reset state" which
7700 * should also put the link in off/reset state
7701 */
596585a2 7702 ufshcd_hba_stop(hba, true);
57d104c1
SJ
7703 /*
7704 * TODO: Check if we need any delay to make sure that
7705 * controller is reset
7706 */
7707 ufshcd_set_link_off(hba);
7708 }
7709
7710out:
7711 return ret;
7712}
7713
7714static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7715{
b799fdf7
YG
7716 /*
7717 * It seems some UFS devices may keep drawing more than sleep current
7718 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7719 * To avoid this situation, add 2ms delay before putting these UFS
7720 * rails in LPM mode.
7721 */
7722 if (!ufshcd_is_link_active(hba) &&
7723 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7724 usleep_range(2000, 2100);
7725
57d104c1
SJ
7726 /*
7727 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7728 * power.
7729 *
7730 * If UFS device and link is in OFF state, all power supplies (VCC,
7731 * VCCQ, VCCQ2) can be turned off if power on write protect is not
7732 * required. If UFS link is inactive (Hibern8 or OFF state) and device
7733 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7734 *
7735 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7736 * in low power state which would save some power.
7737 */
7738 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7739 !hba->dev_info.is_lu_power_on_wp) {
7740 ufshcd_setup_vreg(hba, false);
7741 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7742 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7743 if (!ufshcd_is_link_active(hba)) {
7744 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7745 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7746 }
7747 }
7748}
7749
7750static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7751{
7752 int ret = 0;
7753
7754 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7755 !hba->dev_info.is_lu_power_on_wp) {
7756 ret = ufshcd_setup_vreg(hba, true);
7757 } else if (!ufshcd_is_ufs_dev_active(hba)) {
57d104c1
SJ
7758 if (!ret && !ufshcd_is_link_active(hba)) {
7759 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7760 if (ret)
7761 goto vcc_disable;
7762 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7763 if (ret)
7764 goto vccq_lpm;
7765 }
69d72ac8 7766 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
57d104c1
SJ
7767 }
7768 goto out;
7769
7770vccq_lpm:
7771 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7772vcc_disable:
7773 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7774out:
7775 return ret;
7776}
7777
7778static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7779{
7780 if (ufshcd_is_link_off(hba))
7781 ufshcd_setup_hba_vreg(hba, false);
7782}
7783
7784static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7785{
7786 if (ufshcd_is_link_off(hba))
7787 ufshcd_setup_hba_vreg(hba, true);
7788}
7789
7a3e97b0 7790/**
57d104c1 7791 * ufshcd_suspend - helper function for suspend operations
3b1d0580 7792 * @hba: per adapter instance
57d104c1
SJ
7793 * @pm_op: desired low power operation type
7794 *
7795 * This function will try to put the UFS device and link into low power
7796 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7797 * (System PM level).
7798 *
7799 * If this function is called during shutdown, it will make sure that
7800 * both UFS device and UFS link is powered off.
7a3e97b0 7801 *
57d104c1
SJ
7802 * NOTE: UFS device & link must be active before we enter in this function.
7803 *
7804 * Returns 0 for success and non-zero for failure
7a3e97b0 7805 */
57d104c1 7806static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 7807{
57d104c1
SJ
7808 int ret = 0;
7809 enum ufs_pm_level pm_lvl;
7810 enum ufs_dev_pwr_mode req_dev_pwr_mode;
7811 enum uic_link_state req_link_state;
7812
7813 hba->pm_op_in_progress = 1;
7814 if (!ufshcd_is_shutdown_pm(pm_op)) {
7815 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
7816 hba->rpm_lvl : hba->spm_lvl;
7817 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
7818 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
7819 } else {
7820 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
7821 req_link_state = UIC_LINK_OFF_STATE;
7822 }
7823
7a3e97b0 7824 /*
57d104c1
SJ
7825 * If we can't transition into any of the low power modes
7826 * just gate the clocks.
7a3e97b0 7827 */
1ab27c9c
ST
7828 ufshcd_hold(hba, false);
7829 hba->clk_gating.is_suspended = true;
7830
401f1e44
SJ
7831 if (hba->clk_scaling.is_allowed) {
7832 cancel_work_sync(&hba->clk_scaling.suspend_work);
7833 cancel_work_sync(&hba->clk_scaling.resume_work);
7834 ufshcd_suspend_clkscaling(hba);
7835 }
d6fcf81a 7836
57d104c1
SJ
7837 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
7838 req_link_state == UIC_LINK_ACTIVE_STATE) {
7839 goto disable_clks;
7840 }
7a3e97b0 7841
57d104c1
SJ
7842 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
7843 (req_link_state == hba->uic_link_state))
d6fcf81a 7844 goto enable_gating;
57d104c1
SJ
7845
7846 /* UFS device & link must be active before we enter in this function */
7847 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
7848 ret = -EINVAL;
d6fcf81a 7849 goto enable_gating;
57d104c1
SJ
7850 }
7851
7852 if (ufshcd_is_runtime_pm(pm_op)) {
374a246e
SJ
7853 if (ufshcd_can_autobkops_during_suspend(hba)) {
7854 /*
7855 * The device is idle with no requests in the queue,
7856 * allow background operations if bkops status shows
7857 * that performance might be impacted.
7858 */
7859 ret = ufshcd_urgent_bkops(hba);
7860 if (ret)
7861 goto enable_gating;
7862 } else {
7863 /* make sure that auto bkops is disabled */
7864 ufshcd_disable_auto_bkops(hba);
7865 }
57d104c1
SJ
7866 }
7867
7868 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7869 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7870 !ufshcd_is_runtime_pm(pm_op))) {
7871 /* ensure that bkops is disabled */
7872 ufshcd_disable_auto_bkops(hba);
7873 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7874 if (ret)
1ab27c9c 7875 goto enable_gating;
57d104c1
SJ
7876 }
7877
7878 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
7879 if (ret)
7880 goto set_dev_active;
7881
7882 ufshcd_vreg_set_lpm(hba);
7883
7884disable_clks:
7885 /*
7886 * Call vendor specific suspend callback. As these callbacks may access
7887 * vendor specific host controller register space call them before the
7888 * host clocks are ON.
7889 */
0263bcd0
YG
7890 ret = ufshcd_vops_suspend(hba, pm_op);
7891 if (ret)
7892 goto set_link_active;
dcb6cec5
SC
7893 /*
7894 * Disable the host irq as host controller as there won't be any
7895 * host controller transaction expected till resume.
7896 */
7897 ufshcd_disable_irq(hba);
57d104c1 7898
57d104c1
SJ
7899 if (!ufshcd_is_link_active(hba))
7900 ufshcd_setup_clocks(hba, false);
7901 else
7902 /* If link is active, device ref_clk can't be switched off */
7903 __ufshcd_setup_clocks(hba, false, true);
7904
1ab27c9c 7905 hba->clk_gating.state = CLKS_OFF;
7ff5ab47 7906 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
dcb6cec5 7907
57d104c1
SJ
7908 /* Put the host controller in low power mode if possible */
7909 ufshcd_hba_vreg_set_lpm(hba);
7910 goto out;
7911
57d104c1 7912set_link_active:
401f1e44
SJ
7913 if (hba->clk_scaling.is_allowed)
7914 ufshcd_resume_clkscaling(hba);
57d104c1
SJ
7915 ufshcd_vreg_set_hpm(hba);
7916 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
7917 ufshcd_set_link_active(hba);
7918 else if (ufshcd_is_link_off(hba))
7919 ufshcd_host_reset_and_restore(hba);
7920set_dev_active:
7921 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
7922 ufshcd_disable_auto_bkops(hba);
1ab27c9c 7923enable_gating:
401f1e44
SJ
7924 if (hba->clk_scaling.is_allowed)
7925 ufshcd_resume_clkscaling(hba);
1ab27c9c
ST
7926 hba->clk_gating.is_suspended = false;
7927 ufshcd_release(hba);
57d104c1
SJ
7928out:
7929 hba->pm_op_in_progress = 0;
8808b4e9
SC
7930 if (ret)
7931 ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
57d104c1 7932 return ret;
7a3e97b0
SY
7933}
7934
7935/**
57d104c1 7936 * ufshcd_resume - helper function for resume operations
3b1d0580 7937 * @hba: per adapter instance
57d104c1 7938 * @pm_op: runtime PM or system PM
7a3e97b0 7939 *
57d104c1
SJ
7940 * This function basically brings the UFS device, UniPro link and controller
7941 * to active state.
7942 *
7943 * Returns 0 for success and non-zero for failure
7a3e97b0 7944 */
57d104c1 7945static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 7946{
57d104c1
SJ
7947 int ret;
7948 enum uic_link_state old_link_state;
7949
7950 hba->pm_op_in_progress = 1;
7951 old_link_state = hba->uic_link_state;
7952
7953 ufshcd_hba_vreg_set_hpm(hba);
7954 /* Make sure clocks are enabled before accessing controller */
7955 ret = ufshcd_setup_clocks(hba, true);
7956 if (ret)
7957 goto out;
7958
57d104c1 7959 /* enable the host irq as host controller would be active soon */
5231d38c 7960 ufshcd_enable_irq(hba);
57d104c1
SJ
7961
7962 ret = ufshcd_vreg_set_hpm(hba);
7963 if (ret)
7964 goto disable_irq_and_vops_clks;
7965
7a3e97b0 7966 /*
57d104c1
SJ
7967 * Call vendor specific resume callback. As these callbacks may access
7968 * vendor specific host controller register space call them when the
7969 * host clocks are ON.
7a3e97b0 7970 */
0263bcd0
YG
7971 ret = ufshcd_vops_resume(hba, pm_op);
7972 if (ret)
7973 goto disable_vreg;
57d104c1
SJ
7974
7975 if (ufshcd_is_link_hibern8(hba)) {
7976 ret = ufshcd_uic_hibern8_exit(hba);
7977 if (!ret)
7978 ufshcd_set_link_active(hba);
7979 else
7980 goto vendor_suspend;
7981 } else if (ufshcd_is_link_off(hba)) {
7982 ret = ufshcd_host_reset_and_restore(hba);
7983 /*
7984 * ufshcd_host_reset_and_restore() should have already
7985 * set the link state as active
7986 */
7987 if (ret || !ufshcd_is_link_active(hba))
7988 goto vendor_suspend;
7989 }
7990
7991 if (!ufshcd_is_ufs_dev_active(hba)) {
7992 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
7993 if (ret)
7994 goto set_old_link_state;
7995 }
7996
4e768e76
SJ
7997 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
7998 ufshcd_enable_auto_bkops(hba);
7999 else
8000 /*
8001 * If BKOPs operations are urgently needed at this moment then
8002 * keep auto-bkops enabled or else disable it.
8003 */
8004 ufshcd_urgent_bkops(hba);
8005
1ab27c9c
ST
8006 hba->clk_gating.is_suspended = false;
8007
fcb0c4b0
ST
8008 if (hba->clk_scaling.is_allowed)
8009 ufshcd_resume_clkscaling(hba);
856b3483 8010
ad448378
AH
8011 /* Enable Auto-Hibernate if configured */
8012 ufshcd_auto_hibern8_enable(hba);
8013
71d848b8
CG
8014 /* Schedule clock gating in case of no access to UFS device yet */
8015 ufshcd_release(hba);
8016
57d104c1
SJ
8017 goto out;
8018
8019set_old_link_state:
8020 ufshcd_link_state_transition(hba, old_link_state, 0);
8021vendor_suspend:
0263bcd0 8022 ufshcd_vops_suspend(hba, pm_op);
57d104c1
SJ
8023disable_vreg:
8024 ufshcd_vreg_set_lpm(hba);
8025disable_irq_and_vops_clks:
8026 ufshcd_disable_irq(hba);
401f1e44
SJ
8027 if (hba->clk_scaling.is_allowed)
8028 ufshcd_suspend_clkscaling(hba);
57d104c1
SJ
8029 ufshcd_setup_clocks(hba, false);
8030out:
8031 hba->pm_op_in_progress = 0;
8808b4e9
SC
8032 if (ret)
8033 ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
57d104c1
SJ
8034 return ret;
8035}
8036
8037/**
8038 * ufshcd_system_suspend - system suspend routine
8039 * @hba: per adapter instance
57d104c1
SJ
8040 *
8041 * Check the description of ufshcd_suspend() function for more details.
8042 *
8043 * Returns 0 for success and non-zero for failure
8044 */
8045int ufshcd_system_suspend(struct ufs_hba *hba)
8046{
8047 int ret = 0;
7ff5ab47 8048 ktime_t start = ktime_get();
57d104c1
SJ
8049
8050 if (!hba || !hba->is_powered)
233b594b 8051 return 0;
57d104c1 8052
0b257734
SJ
8053 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
8054 hba->curr_dev_pwr_mode) &&
8055 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
8056 hba->uic_link_state))
8057 goto out;
57d104c1 8058
0b257734 8059 if (pm_runtime_suspended(hba->dev)) {
57d104c1
SJ
8060 /*
8061 * UFS device and/or UFS link low power states during runtime
8062 * suspend seems to be different than what is expected during
8063 * system suspend. Hence runtime resume the devic & link and
8064 * let the system suspend low power states to take effect.
8065 * TODO: If resume takes longer time, we might have optimize
8066 * it in future by not resuming everything if possible.
8067 */
8068 ret = ufshcd_runtime_resume(hba);
8069 if (ret)
8070 goto out;
8071 }
8072
8073 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
8074out:
7ff5ab47
SJ
8075 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
8076 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8077 hba->curr_dev_pwr_mode, hba->uic_link_state);
e785060e
DR
8078 if (!ret)
8079 hba->is_sys_suspended = true;
57d104c1
SJ
8080 return ret;
8081}
8082EXPORT_SYMBOL(ufshcd_system_suspend);
8083
8084/**
8085 * ufshcd_system_resume - system resume routine
8086 * @hba: per adapter instance
8087 *
8088 * Returns 0 for success and non-zero for failure
8089 */
7a3e97b0 8090
57d104c1
SJ
8091int ufshcd_system_resume(struct ufs_hba *hba)
8092{
7ff5ab47
SJ
8093 int ret = 0;
8094 ktime_t start = ktime_get();
8095
e3ce73d6
YG
8096 if (!hba)
8097 return -EINVAL;
8098
8099 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
57d104c1
SJ
8100 /*
8101 * Let the runtime resume take care of resuming
8102 * if runtime suspended.
8103 */
7ff5ab47
SJ
8104 goto out;
8105 else
8106 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8107out:
8108 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8109 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8110 hba->curr_dev_pwr_mode, hba->uic_link_state);
ce9e7bce
SC
8111 if (!ret)
8112 hba->is_sys_suspended = false;
7ff5ab47 8113 return ret;
7a3e97b0 8114}
57d104c1 8115EXPORT_SYMBOL(ufshcd_system_resume);
3b1d0580 8116
57d104c1
SJ
8117/**
8118 * ufshcd_runtime_suspend - runtime suspend routine
8119 * @hba: per adapter instance
8120 *
8121 * Check the description of ufshcd_suspend() function for more details.
8122 *
8123 * Returns 0 for success and non-zero for failure
8124 */
66ec6d59
SRT
8125int ufshcd_runtime_suspend(struct ufs_hba *hba)
8126{
7ff5ab47
SJ
8127 int ret = 0;
8128 ktime_t start = ktime_get();
8129
e3ce73d6
YG
8130 if (!hba)
8131 return -EINVAL;
8132
8133 if (!hba->is_powered)
7ff5ab47
SJ
8134 goto out;
8135 else
8136 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8137out:
8138 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8139 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8140 hba->curr_dev_pwr_mode, hba->uic_link_state);
7ff5ab47 8141 return ret;
66ec6d59
SRT
8142}
8143EXPORT_SYMBOL(ufshcd_runtime_suspend);
8144
57d104c1
SJ
8145/**
8146 * ufshcd_runtime_resume - runtime resume routine
8147 * @hba: per adapter instance
8148 *
8149 * This function basically brings the UFS device, UniPro link and controller
8150 * to active state. Following operations are done in this function:
8151 *
8152 * 1. Turn on all the controller related clocks
8153 * 2. Bring the UniPro link out of Hibernate state
8154 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8155 * to active state.
8156 * 4. If auto-bkops is enabled on the device, disable it.
8157 *
8158 * So following would be the possible power state after this function return
8159 * successfully:
8160 * S1: UFS device in Active state with VCC rail ON
8161 * UniPro link in Active state
8162 * All the UFS/UniPro controller clocks are ON
8163 *
8164 * Returns 0 for success and non-zero for failure
8165 */
66ec6d59
SRT
8166int ufshcd_runtime_resume(struct ufs_hba *hba)
8167{
7ff5ab47
SJ
8168 int ret = 0;
8169 ktime_t start = ktime_get();
8170
e3ce73d6
YG
8171 if (!hba)
8172 return -EINVAL;
8173
8174 if (!hba->is_powered)
7ff5ab47
SJ
8175 goto out;
8176 else
8177 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8178out:
8179 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8180 ktime_to_us(ktime_sub(ktime_get(), start)),
73eba2be 8181 hba->curr_dev_pwr_mode, hba->uic_link_state);
7ff5ab47 8182 return ret;
66ec6d59
SRT
8183}
8184EXPORT_SYMBOL(ufshcd_runtime_resume);
8185
8186int ufshcd_runtime_idle(struct ufs_hba *hba)
8187{
8188 return 0;
8189}
8190EXPORT_SYMBOL(ufshcd_runtime_idle);
8191
57d104c1
SJ
8192/**
8193 * ufshcd_shutdown - shutdown routine
8194 * @hba: per adapter instance
8195 *
8196 * This function would power off both UFS device and UFS link.
8197 *
8198 * Returns 0 always to allow force shutdown even in case of errors.
8199 */
8200int ufshcd_shutdown(struct ufs_hba *hba)
8201{
8202 int ret = 0;
8203
f51913ee
SC
8204 if (!hba->is_powered)
8205 goto out;
8206
57d104c1
SJ
8207 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
8208 goto out;
8209
8210 if (pm_runtime_suspended(hba->dev)) {
8211 ret = ufshcd_runtime_resume(hba);
8212 if (ret)
8213 goto out;
8214 }
8215
8216 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
8217out:
8218 if (ret)
8219 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
8220 /* allow force shutdown even in case of errors */
8221 return 0;
8222}
8223EXPORT_SYMBOL(ufshcd_shutdown);
8224
7a3e97b0 8225/**
3b1d0580 8226 * ufshcd_remove - de-allocate SCSI host and host memory space
7a3e97b0 8227 * data structure memory
8aa29f19 8228 * @hba: per adapter instance
7a3e97b0 8229 */
3b1d0580 8230void ufshcd_remove(struct ufs_hba *hba)
7a3e97b0 8231{
df032bf2 8232 ufs_bsg_remove(hba);
cbb6813e 8233 ufs_sysfs_remove_nodes(hba->dev);
69a6c269
BVA
8234 blk_cleanup_queue(hba->tmf_queue);
8235 blk_mq_free_tag_set(&hba->tmf_tag_set);
7252a360 8236 blk_cleanup_queue(hba->cmd_queue);
cfdf9c91 8237 scsi_remove_host(hba->host);
7a3e97b0 8238 /* disable interrupts */
2fbd009b 8239 ufshcd_disable_intr(hba, hba->intr_mask);
596585a2 8240 ufshcd_hba_stop(hba, true);
7a3e97b0 8241
eebcc196 8242 ufshcd_exit_clk_scaling(hba);
1ab27c9c 8243 ufshcd_exit_clk_gating(hba);
fcb0c4b0
ST
8244 if (ufshcd_is_clkscaling_supported(hba))
8245 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
aa497613 8246 ufshcd_hba_exit(hba);
3b1d0580
VH
8247}
8248EXPORT_SYMBOL_GPL(ufshcd_remove);
8249
47555a5c
YG
8250/**
8251 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8252 * @hba: pointer to Host Bus Adapter (HBA)
8253 */
8254void ufshcd_dealloc_host(struct ufs_hba *hba)
8255{
8256 scsi_host_put(hba->host);
8257}
8258EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
8259
ca3d7bf9
AM
8260/**
8261 * ufshcd_set_dma_mask - Set dma mask based on the controller
8262 * addressing capability
8263 * @hba: per adapter instance
8264 *
8265 * Returns 0 for success, non-zero for failure
8266 */
8267static int ufshcd_set_dma_mask(struct ufs_hba *hba)
8268{
8269 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
8270 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
8271 return 0;
8272 }
8273 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
8274}
8275
7a3e97b0 8276/**
5c0c28a8 8277 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
3b1d0580
VH
8278 * @dev: pointer to device handle
8279 * @hba_handle: driver private handle
7a3e97b0
SY
8280 * Returns 0 on success, non-zero value on failure
8281 */
5c0c28a8 8282int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7a3e97b0
SY
8283{
8284 struct Scsi_Host *host;
8285 struct ufs_hba *hba;
5c0c28a8 8286 int err = 0;
7a3e97b0 8287
3b1d0580
VH
8288 if (!dev) {
8289 dev_err(dev,
8290 "Invalid memory reference for dev is NULL\n");
8291 err = -ENODEV;
7a3e97b0
SY
8292 goto out_error;
8293 }
8294
7a3e97b0
SY
8295 host = scsi_host_alloc(&ufshcd_driver_template,
8296 sizeof(struct ufs_hba));
8297 if (!host) {
3b1d0580 8298 dev_err(dev, "scsi_host_alloc failed\n");
7a3e97b0 8299 err = -ENOMEM;
3b1d0580 8300 goto out_error;
7a3e97b0
SY
8301 }
8302 hba = shost_priv(host);
7a3e97b0 8303 hba->host = host;
3b1d0580 8304 hba->dev = dev;
5c0c28a8 8305 *hba_handle = hba;
9e1e8a75 8306 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
5c0c28a8 8307
566ec9ad
SM
8308 INIT_LIST_HEAD(&hba->clk_list_head);
8309
5c0c28a8
SRT
8310out_error:
8311 return err;
8312}
8313EXPORT_SYMBOL(ufshcd_alloc_host);
8314
69a6c269
BVA
8315/* This function exists because blk_mq_alloc_tag_set() requires this. */
8316static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
8317 const struct blk_mq_queue_data *qd)
8318{
8319 WARN_ON_ONCE(true);
8320 return BLK_STS_NOTSUPP;
8321}
8322
8323static const struct blk_mq_ops ufshcd_tmf_ops = {
8324 .queue_rq = ufshcd_queue_tmf,
8325};
8326
5c0c28a8
SRT
8327/**
8328 * ufshcd_init - Driver initialization routine
8329 * @hba: per-adapter instance
8330 * @mmio_base: base register address
8331 * @irq: Interrupt line of device
8332 * Returns 0 on success, non-zero value on failure
8333 */
8334int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8335{
8336 int err;
8337 struct Scsi_Host *host = hba->host;
8338 struct device *dev = hba->dev;
8339
8340 if (!mmio_base) {
8341 dev_err(hba->dev,
8342 "Invalid memory reference for mmio_base is NULL\n");
8343 err = -ENODEV;
8344 goto out_error;
8345 }
8346
3b1d0580
VH
8347 hba->mmio_base = mmio_base;
8348 hba->irq = irq;
7a3e97b0 8349
aa497613 8350 err = ufshcd_hba_init(hba);
5c0c28a8
SRT
8351 if (err)
8352 goto out_error;
8353
7a3e97b0
SY
8354 /* Read capabilities registers */
8355 ufshcd_hba_capabilities(hba);
8356
8357 /* Get UFS version supported by the controller */
8358 hba->ufs_version = ufshcd_get_ufs_version(hba);
8359
c01848c6
YG
8360 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8361 (hba->ufs_version != UFSHCI_VERSION_11) &&
8362 (hba->ufs_version != UFSHCI_VERSION_20) &&
8363 (hba->ufs_version != UFSHCI_VERSION_21))
8364 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8365 hba->ufs_version);
8366
2fbd009b
SJ
8367 /* Get Interrupt bit mask per version */
8368 hba->intr_mask = ufshcd_get_intr_mask(hba);
8369
ca3d7bf9
AM
8370 err = ufshcd_set_dma_mask(hba);
8371 if (err) {
8372 dev_err(hba->dev, "set dma mask failed\n");
8373 goto out_disable;
8374 }
8375
7a3e97b0
SY
8376 /* Allocate memory for host memory space */
8377 err = ufshcd_memory_alloc(hba);
8378 if (err) {
3b1d0580
VH
8379 dev_err(hba->dev, "Memory allocation failed\n");
8380 goto out_disable;
7a3e97b0
SY
8381 }
8382
8383 /* Configure LRB */
8384 ufshcd_host_memory_configure(hba);
8385
8386 host->can_queue = hba->nutrs;
8387 host->cmd_per_lun = hba->nutrs;
8388 host->max_id = UFSHCD_MAX_ID;
0ce147d4 8389 host->max_lun = UFS_MAX_LUNS;
7a3e97b0
SY
8390 host->max_channel = UFSHCD_MAX_CHANNEL;
8391 host->unique_id = host->host_no;
a851b2bd 8392 host->max_cmd_len = UFS_CDB_SIZE;
7a3e97b0 8393
7eb584db
DR
8394 hba->max_pwr_info.is_valid = false;
8395
7a3e97b0 8396 /* Initialize work queues */
e8e7f271 8397 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
66ec6d59 8398 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
7a3e97b0 8399
6ccf44fe
SJ
8400 /* Initialize UIC command mutex */
8401 mutex_init(&hba->uic_cmd_mutex);
8402
5a0b0cb9
SRT
8403 /* Initialize mutex for device management commands */
8404 mutex_init(&hba->dev_cmd.lock);
8405
a3cd5ec5
SJ
8406 init_rwsem(&hba->clk_scaling_lock);
8407
1ab27c9c 8408 ufshcd_init_clk_gating(hba);
199ef13c 8409
eebcc196
VG
8410 ufshcd_init_clk_scaling(hba);
8411
199ef13c
YG
8412 /*
8413 * In order to avoid any spurious interrupt immediately after
8414 * registering UFS controller interrupt handler, clear any pending UFS
8415 * interrupt status and disable all the UFS interrupts.
8416 */
8417 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8418 REG_INTERRUPT_STATUS);
8419 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8420 /*
8421 * Make sure that UFS interrupts are disabled and any pending interrupt
8422 * status is cleared before registering UFS interrupt handler.
8423 */
8424 mb();
8425
7a3e97b0 8426 /* IRQ registration */
2953f850 8427 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7a3e97b0 8428 if (err) {
3b1d0580 8429 dev_err(hba->dev, "request irq failed\n");
1ab27c9c 8430 goto exit_gating;
57d104c1
SJ
8431 } else {
8432 hba->is_irq_enabled = true;
7a3e97b0
SY
8433 }
8434
3b1d0580 8435 err = scsi_add_host(host, hba->dev);
7a3e97b0 8436 if (err) {
3b1d0580 8437 dev_err(hba->dev, "scsi_add_host failed\n");
1ab27c9c 8438 goto exit_gating;
7a3e97b0
SY
8439 }
8440
7252a360
BVA
8441 hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
8442 if (IS_ERR(hba->cmd_queue)) {
8443 err = PTR_ERR(hba->cmd_queue);
8444 goto out_remove_scsi_host;
8445 }
8446
69a6c269
BVA
8447 hba->tmf_tag_set = (struct blk_mq_tag_set) {
8448 .nr_hw_queues = 1,
8449 .queue_depth = hba->nutmrs,
8450 .ops = &ufshcd_tmf_ops,
8451 .flags = BLK_MQ_F_NO_SCHED,
8452 };
8453 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
8454 if (err < 0)
8455 goto free_cmd_queue;
8456 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
8457 if (IS_ERR(hba->tmf_queue)) {
8458 err = PTR_ERR(hba->tmf_queue);
8459 goto free_tmf_tag_set;
8460 }
8461
d8d9f793
BA
8462 /* Reset the attached device */
8463 ufshcd_vops_device_reset(hba);
8464
6ccf44fe
SJ
8465 /* Host controller enable */
8466 err = ufshcd_hba_enable(hba);
7a3e97b0 8467 if (err) {
6ccf44fe 8468 dev_err(hba->dev, "Host controller enable failed\n");
66cc820f 8469 ufshcd_print_host_regs(hba);
6ba65588 8470 ufshcd_print_host_state(hba);
69a6c269 8471 goto free_tmf_queue;
7a3e97b0 8472 }
6ccf44fe 8473
0c8f7586
SJ
8474 /*
8475 * Set the default power management level for runtime and system PM.
8476 * Default power saving mode is to keep UFS link in Hibern8 state
8477 * and UFS device in sleep state.
8478 */
8479 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8480 UFS_SLEEP_PWR_MODE,
8481 UIC_LINK_HIBERN8_STATE);
8482 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8483 UFS_SLEEP_PWR_MODE,
8484 UIC_LINK_HIBERN8_STATE);
8485
ad448378 8486 /* Set the default auto-hiberate idle timer value to 150 ms */
f571b377 8487 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
ad448378
AH
8488 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
8489 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
8490 }
8491
62694735
SRT
8492 /* Hold auto suspend until async scan completes */
8493 pm_runtime_get_sync(dev);
38135535 8494 atomic_set(&hba->scsi_block_reqs_cnt, 0);
57d104c1 8495 /*
7caf489b
SJ
8496 * We are assuming that device wasn't put in sleep/power-down
8497 * state exclusively during the boot stage before kernel.
8498 * This assumption helps avoid doing link startup twice during
8499 * ufshcd_probe_hba().
57d104c1 8500 */
7caf489b 8501 ufshcd_set_ufs_dev_active(hba);
57d104c1 8502
6ccf44fe 8503 async_schedule(ufshcd_async_scan, hba);
cbb6813e 8504 ufs_sysfs_add_nodes(hba->dev);
6ccf44fe 8505
7a3e97b0
SY
8506 return 0;
8507
69a6c269
BVA
8508free_tmf_queue:
8509 blk_cleanup_queue(hba->tmf_queue);
8510free_tmf_tag_set:
8511 blk_mq_free_tag_set(&hba->tmf_tag_set);
7252a360
BVA
8512free_cmd_queue:
8513 blk_cleanup_queue(hba->cmd_queue);
3b1d0580
VH
8514out_remove_scsi_host:
8515 scsi_remove_host(hba->host);
1ab27c9c 8516exit_gating:
eebcc196 8517 ufshcd_exit_clk_scaling(hba);
1ab27c9c 8518 ufshcd_exit_clk_gating(hba);
3b1d0580 8519out_disable:
57d104c1 8520 hba->is_irq_enabled = false;
aa497613 8521 ufshcd_hba_exit(hba);
3b1d0580
VH
8522out_error:
8523 return err;
8524}
8525EXPORT_SYMBOL_GPL(ufshcd_init);
8526
3b1d0580
VH
8527MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8528MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
e0eca63e 8529MODULE_DESCRIPTION("Generic UFS host controller driver Core");
7a3e97b0
SY
8530MODULE_LICENSE("GPL");
8531MODULE_VERSION(UFSHCD_DRIVER_VERSION);