]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/ufs/ufshcd.c
scsi: ufs-qcom: don't enable interrupt aggregation
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / ufs / ufshcd.c
CommitLineData
7a3e97b0 1/*
e0eca63e 2 * Universal Flash Storage Host controller driver Core
7a3e97b0
SY
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
3b1d0580 5 * Copyright (C) 2011-2013 Samsung India Software Operations
5c0c28a8 6 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
7a3e97b0 7 *
3b1d0580
VH
8 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
7a3e97b0
SY
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
3b1d0580
VH
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
7a3e97b0
SY
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
3b1d0580
VH
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
5c0c28a8
SRT
35 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
7a3e97b0
SY
38 */
39
6ccf44fe 40#include <linux/async.h>
856b3483 41#include <linux/devfreq.h>
6ccf44fe 42
e0eca63e 43#include "ufshcd.h"
53b3d9c3 44#include "unipro.h"
7a3e97b0 45
2fbd009b
SJ
46#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
47 UTP_TASK_REQ_COMPL |\
48 UFSHCD_ERROR_MASK)
6ccf44fe
SJ
49/* UIC command timeout, unit: ms */
50#define UIC_CMD_TIMEOUT 500
2fbd009b 51
5a0b0cb9
SRT
52/* NOP OUT retries waiting for NOP IN response */
53#define NOP_OUT_RETRIES 10
54/* Timeout after 30 msecs if NOP OUT hangs without response */
55#define NOP_OUT_TIMEOUT 30 /* msecs */
56
68078d5c
DR
57/* Query request retries */
58#define QUERY_REQ_RETRIES 10
59/* Query request timeout */
60#define QUERY_REQ_TIMEOUT 30 /* msec */
61
e2933132
SRT
62/* Task management command timeout */
63#define TM_CMD_TIMEOUT 100 /* msecs */
64
1d337ec2
SRT
65/* maximum number of link-startup retries */
66#define DME_LINKSTARTUP_RETRIES 3
67
68/* maximum number of reset retries before giving up */
69#define MAX_HOST_RESET_RETRIES 5
70
68078d5c
DR
71/* Expose the flag value from utp_upiu_query.value */
72#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
73
7d568652
SJ
74/* Interrupt aggregation default timeout, unit: 40us */
75#define INT_AGGR_DEF_TO 0x02
76
aa497613
SRT
77#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
78 ({ \
79 int _ret; \
80 if (_on) \
81 _ret = ufshcd_enable_vreg(_dev, _vreg); \
82 else \
83 _ret = ufshcd_disable_vreg(_dev, _vreg); \
84 _ret; \
85 })
86
da461cec
SJ
87static u32 ufs_query_desc_max_size[] = {
88 QUERY_DESC_DEVICE_MAX_SIZE,
89 QUERY_DESC_CONFIGURAION_MAX_SIZE,
90 QUERY_DESC_UNIT_MAX_SIZE,
91 QUERY_DESC_RFU_MAX_SIZE,
92 QUERY_DESC_INTERCONNECT_MAX_SIZE,
93 QUERY_DESC_STRING_MAX_SIZE,
94 QUERY_DESC_RFU_MAX_SIZE,
95 QUERY_DESC_GEOMETRY_MAZ_SIZE,
96 QUERY_DESC_POWER_MAX_SIZE,
97 QUERY_DESC_RFU_MAX_SIZE,
98};
99
7a3e97b0
SY
100enum {
101 UFSHCD_MAX_CHANNEL = 0,
102 UFSHCD_MAX_ID = 1,
7a3e97b0
SY
103 UFSHCD_CMD_PER_LUN = 32,
104 UFSHCD_CAN_QUEUE = 32,
105};
106
107/* UFSHCD states */
108enum {
7a3e97b0
SY
109 UFSHCD_STATE_RESET,
110 UFSHCD_STATE_ERROR,
3441da7d
SRT
111 UFSHCD_STATE_OPERATIONAL,
112};
113
114/* UFSHCD error handling flags */
115enum {
116 UFSHCD_EH_IN_PROGRESS = (1 << 0),
7a3e97b0
SY
117};
118
e8e7f271
SRT
119/* UFSHCD UIC layer error flags */
120enum {
121 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
122 UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
123 UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
124 UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
125};
126
7a3e97b0
SY
127/* Interrupt configuration options */
128enum {
129 UFSHCD_INT_DISABLE,
130 UFSHCD_INT_ENABLE,
131 UFSHCD_INT_CLEAR,
132};
133
3441da7d
SRT
134#define ufshcd_set_eh_in_progress(h) \
135 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
136#define ufshcd_eh_in_progress(h) \
137 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
138#define ufshcd_clear_eh_in_progress(h) \
139 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
140
57d104c1
SJ
141#define ufshcd_set_ufs_dev_active(h) \
142 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
143#define ufshcd_set_ufs_dev_sleep(h) \
144 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
145#define ufshcd_set_ufs_dev_poweroff(h) \
146 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
147#define ufshcd_is_ufs_dev_active(h) \
148 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
149#define ufshcd_is_ufs_dev_sleep(h) \
150 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
151#define ufshcd_is_ufs_dev_poweroff(h) \
152 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
153
154static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
155 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
156 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
157 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
158 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
159 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
160 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
161};
162
163static inline enum ufs_dev_pwr_mode
164ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
165{
166 return ufs_pm_lvl_states[lvl].dev_state;
167}
168
169static inline enum uic_link_state
170ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
171{
172 return ufs_pm_lvl_states[lvl].link_state;
173}
174
3441da7d
SRT
175static void ufshcd_tmc_handler(struct ufs_hba *hba);
176static void ufshcd_async_scan(void *data, async_cookie_t cookie);
e8e7f271
SRT
177static int ufshcd_reset_and_restore(struct ufs_hba *hba);
178static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
1d337ec2
SRT
179static void ufshcd_hba_exit(struct ufs_hba *hba);
180static int ufshcd_probe_hba(struct ufs_hba *hba);
1ab27c9c
ST
181static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
182 bool skip_ref_clk);
183static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
184static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
185static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
cad2e03d 186static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
57d104c1
SJ
187static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
188static irqreturn_t ufshcd_intr(int irq, void *__hba);
7eb584db
DR
189static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
190 struct ufs_pa_layer_attr *desired_pwr_mode);
57d104c1
SJ
191
192static inline int ufshcd_enable_irq(struct ufs_hba *hba)
193{
194 int ret = 0;
195
196 if (!hba->is_irq_enabled) {
197 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
198 hba);
199 if (ret)
200 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
201 __func__, ret);
202 hba->is_irq_enabled = true;
203 }
204
205 return ret;
206}
207
208static inline void ufshcd_disable_irq(struct ufs_hba *hba)
209{
210 if (hba->is_irq_enabled) {
211 free_irq(hba->irq, hba);
212 hba->is_irq_enabled = false;
213 }
214}
3441da7d 215
5a0b0cb9
SRT
216/*
217 * ufshcd_wait_for_register - wait for register value to change
218 * @hba - per-adapter interface
219 * @reg - mmio register offset
220 * @mask - mask to apply to read register value
221 * @val - wait condition
222 * @interval_us - polling interval in microsecs
223 * @timeout_ms - timeout in millisecs
224 *
225 * Returns -ETIMEDOUT on error, zero on success
226 */
227static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
228 u32 val, unsigned long interval_us, unsigned long timeout_ms)
229{
230 int err = 0;
231 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
232
233 /* ignore bits that we don't intend to wait on */
234 val = val & mask;
235
236 while ((ufshcd_readl(hba, reg) & mask) != val) {
237 /* wakeup within 50us of expiry */
238 usleep_range(interval_us, interval_us + 50);
239
240 if (time_after(jiffies, timeout)) {
241 if ((ufshcd_readl(hba, reg) & mask) != val)
242 err = -ETIMEDOUT;
243 break;
244 }
245 }
246
247 return err;
248}
249
2fbd009b
SJ
250/**
251 * ufshcd_get_intr_mask - Get the interrupt bit mask
252 * @hba - Pointer to adapter instance
253 *
254 * Returns interrupt bit mask per version
255 */
256static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
257{
258 if (hba->ufs_version == UFSHCI_VERSION_10)
259 return INTERRUPT_MASK_ALL_VER_10;
260 else
261 return INTERRUPT_MASK_ALL_VER_11;
262}
263
7a3e97b0
SY
264/**
265 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
266 * @hba - Pointer to adapter instance
267 *
268 * Returns UFSHCI version supported by the controller
269 */
270static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
271{
b873a275 272 return ufshcd_readl(hba, REG_UFS_VERSION);
7a3e97b0
SY
273}
274
275/**
276 * ufshcd_is_device_present - Check if any device connected to
277 * the host controller
5c0c28a8 278 * @hba: pointer to adapter instance
7a3e97b0 279 *
73ec513a 280 * Returns 1 if device present, 0 if no device detected
7a3e97b0 281 */
5c0c28a8 282static inline int ufshcd_is_device_present(struct ufs_hba *hba)
7a3e97b0 283{
5c0c28a8
SRT
284 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
285 DEVICE_PRESENT) ? 1 : 0;
7a3e97b0
SY
286}
287
288/**
289 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
290 * @lrb: pointer to local command reference block
291 *
292 * This function is used to get the OCS field from UTRD
293 * Returns the OCS field in the UTRD
294 */
295static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
296{
e8c8e82a 297 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
7a3e97b0
SY
298}
299
300/**
301 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
302 * @task_req_descp: pointer to utp_task_req_desc structure
303 *
304 * This function is used to get the OCS field from UTMRD
305 * Returns the OCS field in the UTMRD
306 */
307static inline int
308ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
309{
e8c8e82a 310 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
7a3e97b0
SY
311}
312
313/**
314 * ufshcd_get_tm_free_slot - get a free slot for task management request
315 * @hba: per adapter instance
e2933132 316 * @free_slot: pointer to variable with available slot value
7a3e97b0 317 *
e2933132
SRT
318 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
319 * Returns 0 if free slot is not available, else return 1 with tag value
320 * in @free_slot.
7a3e97b0 321 */
e2933132 322static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
7a3e97b0 323{
e2933132
SRT
324 int tag;
325 bool ret = false;
326
327 if (!free_slot)
328 goto out;
329
330 do {
331 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
332 if (tag >= hba->nutmrs)
333 goto out;
334 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
335
336 *free_slot = tag;
337 ret = true;
338out:
339 return ret;
340}
341
342static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
343{
344 clear_bit_unlock(slot, &hba->tm_slots_in_use);
7a3e97b0
SY
345}
346
347/**
348 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
349 * @hba: per adapter instance
350 * @pos: position of the bit to be cleared
351 */
352static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
353{
b873a275 354 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
7a3e97b0
SY
355}
356
357/**
358 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
359 * @reg: Register value of host controller status
360 *
361 * Returns integer, 0 on Success and positive value if failed
362 */
363static inline int ufshcd_get_lists_status(u32 reg)
364{
365 /*
366 * The mask 0xFF is for the following HCS register bits
367 * Bit Description
368 * 0 Device Present
369 * 1 UTRLRDY
370 * 2 UTMRLRDY
371 * 3 UCRDY
372 * 4 HEI
373 * 5 DEI
374 * 6-7 reserved
375 */
376 return (((reg) & (0xFF)) >> 1) ^ (0x07);
377}
378
379/**
380 * ufshcd_get_uic_cmd_result - Get the UIC command result
381 * @hba: Pointer to adapter instance
382 *
383 * This function gets the result of UIC command completion
384 * Returns 0 on success, non zero value on error
385 */
386static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
387{
b873a275 388 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
7a3e97b0
SY
389 MASK_UIC_COMMAND_RESULT;
390}
391
12b4fdb4
SJ
392/**
393 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
394 * @hba: Pointer to adapter instance
395 *
396 * This function gets UIC command argument3
397 * Returns 0 on success, non zero value on error
398 */
399static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
400{
401 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
402}
403
7a3e97b0 404/**
5a0b0cb9 405 * ufshcd_get_req_rsp - returns the TR response transaction type
7a3e97b0 406 * @ucd_rsp_ptr: pointer to response UPIU
7a3e97b0
SY
407 */
408static inline int
5a0b0cb9 409ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
7a3e97b0 410{
5a0b0cb9 411 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
7a3e97b0
SY
412}
413
414/**
415 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
416 * @ucd_rsp_ptr: pointer to response UPIU
417 *
418 * This function gets the response status and scsi_status from response UPIU
419 * Returns the response result code.
420 */
421static inline int
422ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
423{
424 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
425}
426
1c2623c5
SJ
427/*
428 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
429 * from response UPIU
430 * @ucd_rsp_ptr: pointer to response UPIU
431 *
432 * Return the data segment length.
433 */
434static inline unsigned int
435ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
436{
437 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
438 MASK_RSP_UPIU_DATA_SEG_LEN;
439}
440
66ec6d59
SRT
441/**
442 * ufshcd_is_exception_event - Check if the device raised an exception event
443 * @ucd_rsp_ptr: pointer to response UPIU
444 *
445 * The function checks if the device raised an exception event indicated in
446 * the Device Information field of response UPIU.
447 *
448 * Returns true if exception is raised, false otherwise.
449 */
450static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
451{
452 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
453 MASK_RSP_EXCEPTION_EVENT ? true : false;
454}
455
7a3e97b0 456/**
7d568652 457 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
7a3e97b0 458 * @hba: per adapter instance
7a3e97b0
SY
459 */
460static inline void
7d568652 461ufshcd_reset_intr_aggr(struct ufs_hba *hba)
7a3e97b0 462{
7d568652
SJ
463 ufshcd_writel(hba, INT_AGGR_ENABLE |
464 INT_AGGR_COUNTER_AND_TIMER_RESET,
465 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
466}
467
468/**
469 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
470 * @hba: per adapter instance
471 * @cnt: Interrupt aggregation counter threshold
472 * @tmout: Interrupt aggregation timeout value
473 */
474static inline void
475ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
476{
477 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
478 INT_AGGR_COUNTER_THLD_VAL(cnt) |
479 INT_AGGR_TIMEOUT_VAL(tmout),
480 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
7a3e97b0
SY
481}
482
b852190e
YG
483/**
484 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
485 * @hba: per adapter instance
486 */
487static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
488{
489 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
490}
491
7a3e97b0
SY
492/**
493 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
494 * When run-stop registers are set to 1, it indicates the
495 * host controller that it can process the requests
496 * @hba: per adapter instance
497 */
498static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
499{
b873a275
SJ
500 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
501 REG_UTP_TASK_REQ_LIST_RUN_STOP);
502 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
503 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
7a3e97b0
SY
504}
505
7a3e97b0
SY
506/**
507 * ufshcd_hba_start - Start controller initialization sequence
508 * @hba: per adapter instance
509 */
510static inline void ufshcd_hba_start(struct ufs_hba *hba)
511{
b873a275 512 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
7a3e97b0
SY
513}
514
515/**
516 * ufshcd_is_hba_active - Get controller state
517 * @hba: per adapter instance
518 *
519 * Returns zero if controller is active, 1 otherwise
520 */
521static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
522{
b873a275 523 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
7a3e97b0
SY
524}
525
1ab27c9c
ST
526static void ufshcd_ungate_work(struct work_struct *work)
527{
528 int ret;
529 unsigned long flags;
530 struct ufs_hba *hba = container_of(work, struct ufs_hba,
531 clk_gating.ungate_work);
532
533 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
534
535 spin_lock_irqsave(hba->host->host_lock, flags);
536 if (hba->clk_gating.state == CLKS_ON) {
537 spin_unlock_irqrestore(hba->host->host_lock, flags);
538 goto unblock_reqs;
539 }
540
541 spin_unlock_irqrestore(hba->host->host_lock, flags);
542 ufshcd_setup_clocks(hba, true);
543
544 /* Exit from hibern8 */
545 if (ufshcd_can_hibern8_during_gating(hba)) {
546 /* Prevent gating in this path */
547 hba->clk_gating.is_suspended = true;
548 if (ufshcd_is_link_hibern8(hba)) {
549 ret = ufshcd_uic_hibern8_exit(hba);
550 if (ret)
551 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
552 __func__, ret);
553 else
554 ufshcd_set_link_active(hba);
555 }
556 hba->clk_gating.is_suspended = false;
557 }
558unblock_reqs:
856b3483
ST
559 if (ufshcd_is_clkscaling_enabled(hba))
560 devfreq_resume_device(hba->devfreq);
1ab27c9c
ST
561 scsi_unblock_requests(hba->host);
562}
563
564/**
565 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
566 * Also, exit from hibern8 mode and set the link as active.
567 * @hba: per adapter instance
568 * @async: This indicates whether caller should ungate clocks asynchronously.
569 */
570int ufshcd_hold(struct ufs_hba *hba, bool async)
571{
572 int rc = 0;
573 unsigned long flags;
574
575 if (!ufshcd_is_clkgating_allowed(hba))
576 goto out;
1ab27c9c
ST
577 spin_lock_irqsave(hba->host->host_lock, flags);
578 hba->clk_gating.active_reqs++;
579
856b3483 580start:
1ab27c9c
ST
581 switch (hba->clk_gating.state) {
582 case CLKS_ON:
583 break;
584 case REQ_CLKS_OFF:
585 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
586 hba->clk_gating.state = CLKS_ON;
587 break;
588 }
589 /*
590 * If we here, it means gating work is either done or
591 * currently running. Hence, fall through to cancel gating
592 * work and to enable clocks.
593 */
594 case CLKS_OFF:
595 scsi_block_requests(hba->host);
596 hba->clk_gating.state = REQ_CLKS_ON;
597 schedule_work(&hba->clk_gating.ungate_work);
598 /*
599 * fall through to check if we should wait for this
600 * work to be done or not.
601 */
602 case REQ_CLKS_ON:
603 if (async) {
604 rc = -EAGAIN;
605 hba->clk_gating.active_reqs--;
606 break;
607 }
608
609 spin_unlock_irqrestore(hba->host->host_lock, flags);
610 flush_work(&hba->clk_gating.ungate_work);
611 /* Make sure state is CLKS_ON before returning */
856b3483 612 spin_lock_irqsave(hba->host->host_lock, flags);
1ab27c9c
ST
613 goto start;
614 default:
615 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
616 __func__, hba->clk_gating.state);
617 break;
618 }
619 spin_unlock_irqrestore(hba->host->host_lock, flags);
620out:
621 return rc;
622}
623
624static void ufshcd_gate_work(struct work_struct *work)
625{
626 struct ufs_hba *hba = container_of(work, struct ufs_hba,
627 clk_gating.gate_work.work);
628 unsigned long flags;
629
630 spin_lock_irqsave(hba->host->host_lock, flags);
631 if (hba->clk_gating.is_suspended) {
632 hba->clk_gating.state = CLKS_ON;
633 goto rel_lock;
634 }
635
636 if (hba->clk_gating.active_reqs
637 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
638 || hba->lrb_in_use || hba->outstanding_tasks
639 || hba->active_uic_cmd || hba->uic_async_done)
640 goto rel_lock;
641
642 spin_unlock_irqrestore(hba->host->host_lock, flags);
643
644 /* put the link into hibern8 mode before turning off clocks */
645 if (ufshcd_can_hibern8_during_gating(hba)) {
646 if (ufshcd_uic_hibern8_enter(hba)) {
647 hba->clk_gating.state = CLKS_ON;
648 goto out;
649 }
650 ufshcd_set_link_hibern8(hba);
651 }
652
856b3483
ST
653 if (ufshcd_is_clkscaling_enabled(hba)) {
654 devfreq_suspend_device(hba->devfreq);
655 hba->clk_scaling.window_start_t = 0;
656 }
657
1ab27c9c
ST
658 if (!ufshcd_is_link_active(hba))
659 ufshcd_setup_clocks(hba, false);
660 else
661 /* If link is active, device ref_clk can't be switched off */
662 __ufshcd_setup_clocks(hba, false, true);
663
664 /*
665 * In case you are here to cancel this work the gating state
666 * would be marked as REQ_CLKS_ON. In this case keep the state
667 * as REQ_CLKS_ON which would anyway imply that clocks are off
668 * and a request to turn them on is pending. By doing this way,
669 * we keep the state machine in tact and this would ultimately
670 * prevent from doing cancel work multiple times when there are
671 * new requests arriving before the current cancel work is done.
672 */
673 spin_lock_irqsave(hba->host->host_lock, flags);
674 if (hba->clk_gating.state == REQ_CLKS_OFF)
675 hba->clk_gating.state = CLKS_OFF;
676
677rel_lock:
678 spin_unlock_irqrestore(hba->host->host_lock, flags);
679out:
680 return;
681}
682
683/* host lock must be held before calling this variant */
684static void __ufshcd_release(struct ufs_hba *hba)
685{
686 if (!ufshcd_is_clkgating_allowed(hba))
687 return;
688
689 hba->clk_gating.active_reqs--;
690
691 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
692 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
693 || hba->lrb_in_use || hba->outstanding_tasks
694 || hba->active_uic_cmd || hba->uic_async_done)
695 return;
696
697 hba->clk_gating.state = REQ_CLKS_OFF;
698 schedule_delayed_work(&hba->clk_gating.gate_work,
699 msecs_to_jiffies(hba->clk_gating.delay_ms));
700}
701
702void ufshcd_release(struct ufs_hba *hba)
703{
704 unsigned long flags;
705
706 spin_lock_irqsave(hba->host->host_lock, flags);
707 __ufshcd_release(hba);
708 spin_unlock_irqrestore(hba->host->host_lock, flags);
709}
710
711static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
712 struct device_attribute *attr, char *buf)
713{
714 struct ufs_hba *hba = dev_get_drvdata(dev);
715
716 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
717}
718
719static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
720 struct device_attribute *attr, const char *buf, size_t count)
721{
722 struct ufs_hba *hba = dev_get_drvdata(dev);
723 unsigned long flags, value;
724
725 if (kstrtoul(buf, 0, &value))
726 return -EINVAL;
727
728 spin_lock_irqsave(hba->host->host_lock, flags);
729 hba->clk_gating.delay_ms = value;
730 spin_unlock_irqrestore(hba->host->host_lock, flags);
731 return count;
732}
733
734static void ufshcd_init_clk_gating(struct ufs_hba *hba)
735{
736 if (!ufshcd_is_clkgating_allowed(hba))
737 return;
738
739 hba->clk_gating.delay_ms = 150;
740 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
741 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
742
743 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
744 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
745 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
746 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
747 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
748 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
749 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
750}
751
752static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
753{
754 if (!ufshcd_is_clkgating_allowed(hba))
755 return;
756 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
97cd6805
AM
757 cancel_work_sync(&hba->clk_gating.ungate_work);
758 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1ab27c9c
ST
759}
760
856b3483
ST
761/* Must be called with host lock acquired */
762static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
763{
764 if (!ufshcd_is_clkscaling_enabled(hba))
765 return;
766
767 if (!hba->clk_scaling.is_busy_started) {
768 hba->clk_scaling.busy_start_t = ktime_get();
769 hba->clk_scaling.is_busy_started = true;
770 }
771}
772
773static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
774{
775 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
776
777 if (!ufshcd_is_clkscaling_enabled(hba))
778 return;
779
780 if (!hba->outstanding_reqs && scaling->is_busy_started) {
781 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
782 scaling->busy_start_t));
783 scaling->busy_start_t = ktime_set(0, 0);
784 scaling->is_busy_started = false;
785 }
786}
7a3e97b0
SY
787/**
788 * ufshcd_send_command - Send SCSI or device management commands
789 * @hba: per adapter instance
790 * @task_tag: Task tag of the command
791 */
792static inline
793void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
794{
856b3483 795 ufshcd_clk_scaling_start_busy(hba);
7a3e97b0 796 __set_bit(task_tag, &hba->outstanding_reqs);
b873a275 797 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7a3e97b0
SY
798}
799
800/**
801 * ufshcd_copy_sense_data - Copy sense data in case of check condition
802 * @lrb - pointer to local reference block
803 */
804static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
805{
806 int len;
1c2623c5
SJ
807 if (lrbp->sense_buffer &&
808 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
5a0b0cb9 809 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
7a3e97b0 810 memcpy(lrbp->sense_buffer,
5a0b0cb9 811 lrbp->ucd_rsp_ptr->sr.sense_data,
7a3e97b0
SY
812 min_t(int, len, SCSI_SENSE_BUFFERSIZE));
813 }
814}
815
68078d5c
DR
816/**
817 * ufshcd_copy_query_response() - Copy the Query Response and the data
818 * descriptor
819 * @hba: per adapter instance
820 * @lrb - pointer to local reference block
821 */
822static
c6d4a831 823int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
68078d5c
DR
824{
825 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
826
68078d5c 827 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
68078d5c 828
68078d5c
DR
829 /* Get the descriptor */
830 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
d44a5f98 831 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
68078d5c 832 GENERAL_UPIU_REQUEST_SIZE;
c6d4a831
DR
833 u16 resp_len;
834 u16 buf_len;
68078d5c
DR
835
836 /* data segment length */
c6d4a831 837 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
68078d5c 838 MASK_QUERY_DATA_SEG_LEN;
ea2aab24
SRT
839 buf_len = be16_to_cpu(
840 hba->dev_cmd.query.request.upiu_req.length);
c6d4a831
DR
841 if (likely(buf_len >= resp_len)) {
842 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
843 } else {
844 dev_warn(hba->dev,
845 "%s: Response size is bigger than buffer",
846 __func__);
847 return -EINVAL;
848 }
68078d5c 849 }
c6d4a831
DR
850
851 return 0;
68078d5c
DR
852}
853
7a3e97b0
SY
854/**
855 * ufshcd_hba_capabilities - Read controller capabilities
856 * @hba: per adapter instance
857 */
858static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
859{
b873a275 860 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
7a3e97b0
SY
861
862 /* nutrs and nutmrs are 0 based values */
863 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
864 hba->nutmrs =
865 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
866}
867
868/**
6ccf44fe
SJ
869 * ufshcd_ready_for_uic_cmd - Check if controller is ready
870 * to accept UIC commands
7a3e97b0 871 * @hba: per adapter instance
6ccf44fe
SJ
872 * Return true on success, else false
873 */
874static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
875{
876 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
877 return true;
878 else
879 return false;
880}
881
53b3d9c3
SJ
882/**
883 * ufshcd_get_upmcrs - Get the power mode change request status
884 * @hba: Pointer to adapter instance
885 *
886 * This function gets the UPMCRS field of HCS register
887 * Returns value of UPMCRS field
888 */
889static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
890{
891 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
892}
893
6ccf44fe
SJ
894/**
895 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
896 * @hba: per adapter instance
897 * @uic_cmd: UIC command
898 *
899 * Mutex must be held.
7a3e97b0
SY
900 */
901static inline void
6ccf44fe 902ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
7a3e97b0 903{
6ccf44fe
SJ
904 WARN_ON(hba->active_uic_cmd);
905
906 hba->active_uic_cmd = uic_cmd;
907
7a3e97b0 908 /* Write Args */
6ccf44fe
SJ
909 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
910 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
911 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
7a3e97b0
SY
912
913 /* Write UIC Cmd */
6ccf44fe 914 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
b873a275 915 REG_UIC_COMMAND);
7a3e97b0
SY
916}
917
6ccf44fe
SJ
918/**
919 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
920 * @hba: per adapter instance
921 * @uic_command: UIC command
922 *
923 * Must be called with mutex held.
924 * Returns 0 only if success.
925 */
926static int
927ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
928{
929 int ret;
930 unsigned long flags;
931
932 if (wait_for_completion_timeout(&uic_cmd->done,
933 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
934 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
935 else
936 ret = -ETIMEDOUT;
937
938 spin_lock_irqsave(hba->host->host_lock, flags);
939 hba->active_uic_cmd = NULL;
940 spin_unlock_irqrestore(hba->host->host_lock, flags);
941
942 return ret;
943}
944
945/**
946 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
947 * @hba: per adapter instance
948 * @uic_cmd: UIC command
949 *
950 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
57d104c1 951 * with mutex held and host_lock locked.
6ccf44fe
SJ
952 * Returns 0 only if success.
953 */
954static int
955__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
956{
6ccf44fe
SJ
957 if (!ufshcd_ready_for_uic_cmd(hba)) {
958 dev_err(hba->dev,
959 "Controller not ready to accept UIC commands\n");
960 return -EIO;
961 }
962
963 init_completion(&uic_cmd->done);
964
6ccf44fe 965 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
6ccf44fe 966
57d104c1 967 return 0;
6ccf44fe
SJ
968}
969
970/**
971 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
972 * @hba: per adapter instance
973 * @uic_cmd: UIC command
974 *
975 * Returns 0 only if success.
976 */
977static int
978ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
979{
980 int ret;
57d104c1 981 unsigned long flags;
6ccf44fe 982
1ab27c9c 983 ufshcd_hold(hba, false);
6ccf44fe 984 mutex_lock(&hba->uic_cmd_mutex);
cad2e03d
YG
985 ufshcd_add_delay_before_dme_cmd(hba);
986
57d104c1 987 spin_lock_irqsave(hba->host->host_lock, flags);
6ccf44fe 988 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
57d104c1
SJ
989 spin_unlock_irqrestore(hba->host->host_lock, flags);
990 if (!ret)
991 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
992
6ccf44fe
SJ
993 mutex_unlock(&hba->uic_cmd_mutex);
994
1ab27c9c 995 ufshcd_release(hba);
6ccf44fe
SJ
996 return ret;
997}
998
7a3e97b0
SY
999/**
1000 * ufshcd_map_sg - Map scatter-gather list to prdt
1001 * @lrbp - pointer to local reference block
1002 *
1003 * Returns 0 in case of success, non-zero value in case of failure
1004 */
1005static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
1006{
1007 struct ufshcd_sg_entry *prd_table;
1008 struct scatterlist *sg;
1009 struct scsi_cmnd *cmd;
1010 int sg_segments;
1011 int i;
1012
1013 cmd = lrbp->cmd;
1014 sg_segments = scsi_dma_map(cmd);
1015 if (sg_segments < 0)
1016 return sg_segments;
1017
1018 if (sg_segments) {
1019 lrbp->utr_descriptor_ptr->prd_table_length =
1020 cpu_to_le16((u16) (sg_segments));
1021
1022 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1023
1024 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1025 prd_table[i].size =
1026 cpu_to_le32(((u32) sg_dma_len(sg))-1);
1027 prd_table[i].base_addr =
1028 cpu_to_le32(lower_32_bits(sg->dma_address));
1029 prd_table[i].upper_addr =
1030 cpu_to_le32(upper_32_bits(sg->dma_address));
1031 }
1032 } else {
1033 lrbp->utr_descriptor_ptr->prd_table_length = 0;
1034 }
1035
1036 return 0;
1037}
1038
1039/**
2fbd009b 1040 * ufshcd_enable_intr - enable interrupts
7a3e97b0 1041 * @hba: per adapter instance
2fbd009b 1042 * @intrs: interrupt bits
7a3e97b0 1043 */
2fbd009b 1044static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
7a3e97b0 1045{
2fbd009b
SJ
1046 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1047
1048 if (hba->ufs_version == UFSHCI_VERSION_10) {
1049 u32 rw;
1050 rw = set & INTERRUPT_MASK_RW_VER_10;
1051 set = rw | ((set ^ intrs) & intrs);
1052 } else {
1053 set |= intrs;
1054 }
1055
1056 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
1057}
1058
1059/**
1060 * ufshcd_disable_intr - disable interrupts
1061 * @hba: per adapter instance
1062 * @intrs: interrupt bits
1063 */
1064static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
1065{
1066 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
1067
1068 if (hba->ufs_version == UFSHCI_VERSION_10) {
1069 u32 rw;
1070 rw = (set & INTERRUPT_MASK_RW_VER_10) &
1071 ~(intrs & INTERRUPT_MASK_RW_VER_10);
1072 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
1073
1074 } else {
1075 set &= ~intrs;
7a3e97b0 1076 }
2fbd009b
SJ
1077
1078 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
7a3e97b0
SY
1079}
1080
5a0b0cb9
SRT
1081/**
1082 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
1083 * descriptor according to request
1084 * @lrbp: pointer to local reference block
1085 * @upiu_flags: flags required in the header
1086 * @cmd_dir: requests data direction
1087 */
1088static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
1089 u32 *upiu_flags, enum dma_data_direction cmd_dir)
1090{
1091 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
1092 u32 data_direction;
1093 u32 dword_0;
1094
1095 if (cmd_dir == DMA_FROM_DEVICE) {
1096 data_direction = UTP_DEVICE_TO_HOST;
1097 *upiu_flags = UPIU_CMD_FLAGS_READ;
1098 } else if (cmd_dir == DMA_TO_DEVICE) {
1099 data_direction = UTP_HOST_TO_DEVICE;
1100 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
1101 } else {
1102 data_direction = UTP_NO_DATA_TRANSFER;
1103 *upiu_flags = UPIU_CMD_FLAGS_NONE;
1104 }
1105
1106 dword_0 = data_direction | (lrbp->command_type
1107 << UPIU_COMMAND_TYPE_OFFSET);
1108 if (lrbp->intr_cmd)
1109 dword_0 |= UTP_REQ_DESC_INT_CMD;
1110
1111 /* Transfer request descriptor header fields */
1112 req_desc->header.dword_0 = cpu_to_le32(dword_0);
1113
1114 /*
1115 * assigning invalid value for command status. Controller
1116 * updates OCS on command completion, with the command
1117 * status
1118 */
1119 req_desc->header.dword_2 =
1120 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
1121}
1122
1123/**
1124 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
1125 * for scsi commands
1126 * @lrbp - local reference block pointer
1127 * @upiu_flags - flags
1128 */
1129static
1130void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
1131{
1132 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1133
1134 /* command descriptor fields */
1135 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1136 UPIU_TRANSACTION_COMMAND, upiu_flags,
1137 lrbp->lun, lrbp->task_tag);
1138 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1139 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1140
1141 /* Total EHS length and Data segment length will be zero */
1142 ucd_req_ptr->header.dword_2 = 0;
1143
1144 ucd_req_ptr->sc.exp_data_transfer_len =
1145 cpu_to_be32(lrbp->cmd->sdb.length);
1146
1147 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
1148 (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
1149}
1150
68078d5c
DR
1151/**
1152 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
1153 * for query requsts
1154 * @hba: UFS hba
1155 * @lrbp: local reference block pointer
1156 * @upiu_flags: flags
1157 */
1158static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
1159 struct ufshcd_lrb *lrbp, u32 upiu_flags)
1160{
1161 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1162 struct ufs_query *query = &hba->dev_cmd.query;
e8c8e82a 1163 u16 len = be16_to_cpu(query->request.upiu_req.length);
68078d5c
DR
1164 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
1165
1166 /* Query request header */
1167 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
1168 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
1169 lrbp->lun, lrbp->task_tag);
1170 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
1171 0, query->request.query_func, 0, 0);
1172
1173 /* Data segment length */
1174 ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
1175 0, 0, len >> 8, (u8)len);
1176
1177 /* Copy the Query Request buffer as is */
1178 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
1179 QUERY_OSF_SIZE);
68078d5c
DR
1180
1181 /* Copy the Descriptor */
c6d4a831
DR
1182 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
1183 memcpy(descp, query->descriptor, len);
1184
68078d5c
DR
1185}
1186
5a0b0cb9
SRT
1187static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
1188{
1189 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
1190
1191 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
1192
1193 /* command descriptor fields */
1194 ucd_req_ptr->header.dword_0 =
1195 UPIU_HEADER_DWORD(
1196 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
1197}
1198
7a3e97b0
SY
1199/**
1200 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
5a0b0cb9 1201 * @hba - per adapter instance
7a3e97b0
SY
1202 * @lrb - pointer to local reference block
1203 */
5a0b0cb9 1204static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
7a3e97b0 1205{
7a3e97b0 1206 u32 upiu_flags;
5a0b0cb9 1207 int ret = 0;
7a3e97b0
SY
1208
1209 switch (lrbp->command_type) {
1210 case UTP_CMD_TYPE_SCSI:
5a0b0cb9
SRT
1211 if (likely(lrbp->cmd)) {
1212 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
1213 lrbp->cmd->sc_data_direction);
1214 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
7a3e97b0 1215 } else {
5a0b0cb9 1216 ret = -EINVAL;
7a3e97b0 1217 }
7a3e97b0
SY
1218 break;
1219 case UTP_CMD_TYPE_DEV_MANAGE:
5a0b0cb9 1220 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
68078d5c
DR
1221 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
1222 ufshcd_prepare_utp_query_req_upiu(
1223 hba, lrbp, upiu_flags);
1224 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
5a0b0cb9
SRT
1225 ufshcd_prepare_utp_nop_upiu(lrbp);
1226 else
1227 ret = -EINVAL;
7a3e97b0
SY
1228 break;
1229 case UTP_CMD_TYPE_UFS:
1230 /* For UFS native command implementation */
5a0b0cb9
SRT
1231 ret = -ENOTSUPP;
1232 dev_err(hba->dev, "%s: UFS native command are not supported\n",
1233 __func__);
1234 break;
1235 default:
1236 ret = -ENOTSUPP;
1237 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
1238 __func__, lrbp->command_type);
7a3e97b0
SY
1239 break;
1240 } /* end of switch */
5a0b0cb9
SRT
1241
1242 return ret;
7a3e97b0
SY
1243}
1244
0ce147d4
SJ
1245/*
1246 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
1247 * @scsi_lun: scsi LUN id
1248 *
1249 * Returns UPIU LUN id
1250 */
1251static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1252{
1253 if (scsi_is_wlun(scsi_lun))
1254 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1255 | UFS_UPIU_WLUN_ID;
1256 else
1257 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1258}
1259
2a8fa600
SJ
1260/**
1261 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
1262 * @scsi_lun: UPIU W-LUN id
1263 *
1264 * Returns SCSI W-LUN id
1265 */
1266static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
1267{
1268 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
1269}
1270
7a3e97b0
SY
1271/**
1272 * ufshcd_queuecommand - main entry point for SCSI requests
1273 * @cmd: command from SCSI Midlayer
1274 * @done: call back function
1275 *
1276 * Returns 0 for success, non-zero in case of failure
1277 */
1278static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1279{
1280 struct ufshcd_lrb *lrbp;
1281 struct ufs_hba *hba;
1282 unsigned long flags;
1283 int tag;
1284 int err = 0;
1285
1286 hba = shost_priv(host);
1287
1288 tag = cmd->request->tag;
1289
3441da7d
SRT
1290 spin_lock_irqsave(hba->host->host_lock, flags);
1291 switch (hba->ufshcd_state) {
1292 case UFSHCD_STATE_OPERATIONAL:
1293 break;
1294 case UFSHCD_STATE_RESET:
7a3e97b0 1295 err = SCSI_MLQUEUE_HOST_BUSY;
3441da7d
SRT
1296 goto out_unlock;
1297 case UFSHCD_STATE_ERROR:
1298 set_host_byte(cmd, DID_ERROR);
1299 cmd->scsi_done(cmd);
1300 goto out_unlock;
1301 default:
1302 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
1303 __func__, hba->ufshcd_state);
1304 set_host_byte(cmd, DID_BAD_TARGET);
1305 cmd->scsi_done(cmd);
1306 goto out_unlock;
7a3e97b0 1307 }
3441da7d 1308 spin_unlock_irqrestore(hba->host->host_lock, flags);
7a3e97b0 1309
5a0b0cb9
SRT
1310 /* acquire the tag to make sure device cmds don't use it */
1311 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1312 /*
1313 * Dev manage command in progress, requeue the command.
1314 * Requeuing the command helps in cases where the request *may*
1315 * find different tag instead of waiting for dev manage command
1316 * completion.
1317 */
1318 err = SCSI_MLQUEUE_HOST_BUSY;
1319 goto out;
1320 }
1321
1ab27c9c
ST
1322 err = ufshcd_hold(hba, true);
1323 if (err) {
1324 err = SCSI_MLQUEUE_HOST_BUSY;
1325 clear_bit_unlock(tag, &hba->lrb_in_use);
1326 goto out;
1327 }
1328 WARN_ON(hba->clk_gating.state != CLKS_ON);
1329
7a3e97b0
SY
1330 lrbp = &hba->lrb[tag];
1331
5a0b0cb9 1332 WARN_ON(lrbp->cmd);
7a3e97b0
SY
1333 lrbp->cmd = cmd;
1334 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
1335 lrbp->sense_buffer = cmd->sense_buffer;
1336 lrbp->task_tag = tag;
0ce147d4 1337 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
b852190e 1338 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
7a3e97b0
SY
1339 lrbp->command_type = UTP_CMD_TYPE_SCSI;
1340
1341 /* form UPIU before issuing the command */
5a0b0cb9 1342 ufshcd_compose_upiu(hba, lrbp);
7a3e97b0 1343 err = ufshcd_map_sg(lrbp);
5a0b0cb9
SRT
1344 if (err) {
1345 lrbp->cmd = NULL;
1346 clear_bit_unlock(tag, &hba->lrb_in_use);
7a3e97b0 1347 goto out;
5a0b0cb9 1348 }
7a3e97b0
SY
1349
1350 /* issue command to the controller */
1351 spin_lock_irqsave(hba->host->host_lock, flags);
1352 ufshcd_send_command(hba, tag);
3441da7d 1353out_unlock:
7a3e97b0
SY
1354 spin_unlock_irqrestore(hba->host->host_lock, flags);
1355out:
1356 return err;
1357}
1358
5a0b0cb9
SRT
1359static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
1360 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
1361{
1362 lrbp->cmd = NULL;
1363 lrbp->sense_bufflen = 0;
1364 lrbp->sense_buffer = NULL;
1365 lrbp->task_tag = tag;
1366 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
1367 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
1368 lrbp->intr_cmd = true; /* No interrupt aggregation */
1369 hba->dev_cmd.type = cmd_type;
1370
1371 return ufshcd_compose_upiu(hba, lrbp);
1372}
1373
1374static int
1375ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
1376{
1377 int err = 0;
1378 unsigned long flags;
1379 u32 mask = 1 << tag;
1380
1381 /* clear outstanding transaction before retry */
1382 spin_lock_irqsave(hba->host->host_lock, flags);
1383 ufshcd_utrl_clear(hba, tag);
1384 spin_unlock_irqrestore(hba->host->host_lock, flags);
1385
1386 /*
1387 * wait for for h/w to clear corresponding bit in door-bell.
1388 * max. wait is 1 sec.
1389 */
1390 err = ufshcd_wait_for_register(hba,
1391 REG_UTP_TRANSFER_REQ_DOOR_BELL,
1392 mask, ~mask, 1000, 1000);
1393
1394 return err;
1395}
1396
c6d4a831
DR
1397static int
1398ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1399{
1400 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1401
1402 /* Get the UPIU response */
1403 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
1404 UPIU_RSP_CODE_OFFSET;
1405 return query_res->response;
1406}
1407
5a0b0cb9
SRT
1408/**
1409 * ufshcd_dev_cmd_completion() - handles device management command responses
1410 * @hba: per adapter instance
1411 * @lrbp: pointer to local reference block
1412 */
1413static int
1414ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1415{
1416 int resp;
1417 int err = 0;
1418
1419 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1420
1421 switch (resp) {
1422 case UPIU_TRANSACTION_NOP_IN:
1423 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1424 err = -EINVAL;
1425 dev_err(hba->dev, "%s: unexpected response %x\n",
1426 __func__, resp);
1427 }
1428 break;
68078d5c 1429 case UPIU_TRANSACTION_QUERY_RSP:
c6d4a831
DR
1430 err = ufshcd_check_query_response(hba, lrbp);
1431 if (!err)
1432 err = ufshcd_copy_query_response(hba, lrbp);
68078d5c 1433 break;
5a0b0cb9
SRT
1434 case UPIU_TRANSACTION_REJECT_UPIU:
1435 /* TODO: handle Reject UPIU Response */
1436 err = -EPERM;
1437 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1438 __func__);
1439 break;
1440 default:
1441 err = -EINVAL;
1442 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1443 __func__, resp);
1444 break;
1445 }
1446
1447 return err;
1448}
1449
1450static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1451 struct ufshcd_lrb *lrbp, int max_timeout)
1452{
1453 int err = 0;
1454 unsigned long time_left;
1455 unsigned long flags;
1456
1457 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1458 msecs_to_jiffies(max_timeout));
1459
1460 spin_lock_irqsave(hba->host->host_lock, flags);
1461 hba->dev_cmd.complete = NULL;
1462 if (likely(time_left)) {
1463 err = ufshcd_get_tr_ocs(lrbp);
1464 if (!err)
1465 err = ufshcd_dev_cmd_completion(hba, lrbp);
1466 }
1467 spin_unlock_irqrestore(hba->host->host_lock, flags);
1468
1469 if (!time_left) {
1470 err = -ETIMEDOUT;
1471 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
1472 /* sucessfully cleared the command, retry if needed */
1473 err = -EAGAIN;
1474 }
1475
1476 return err;
1477}
1478
1479/**
1480 * ufshcd_get_dev_cmd_tag - Get device management command tag
1481 * @hba: per-adapter instance
1482 * @tag: pointer to variable with available slot value
1483 *
1484 * Get a free slot and lock it until device management command
1485 * completes.
1486 *
1487 * Returns false if free slot is unavailable for locking, else
1488 * return true with tag value in @tag.
1489 */
1490static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1491{
1492 int tag;
1493 bool ret = false;
1494 unsigned long tmp;
1495
1496 if (!tag_out)
1497 goto out;
1498
1499 do {
1500 tmp = ~hba->lrb_in_use;
1501 tag = find_last_bit(&tmp, hba->nutrs);
1502 if (tag >= hba->nutrs)
1503 goto out;
1504 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1505
1506 *tag_out = tag;
1507 ret = true;
1508out:
1509 return ret;
1510}
1511
1512static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1513{
1514 clear_bit_unlock(tag, &hba->lrb_in_use);
1515}
1516
1517/**
1518 * ufshcd_exec_dev_cmd - API for sending device management requests
1519 * @hba - UFS hba
1520 * @cmd_type - specifies the type (NOP, Query...)
1521 * @timeout - time in seconds
1522 *
68078d5c
DR
1523 * NOTE: Since there is only one available tag for device management commands,
1524 * it is expected you hold the hba->dev_cmd.lock mutex.
5a0b0cb9
SRT
1525 */
1526static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1527 enum dev_cmd_type cmd_type, int timeout)
1528{
1529 struct ufshcd_lrb *lrbp;
1530 int err;
1531 int tag;
1532 struct completion wait;
1533 unsigned long flags;
1534
1535 /*
1536 * Get free slot, sleep if slots are unavailable.
1537 * Even though we use wait_event() which sleeps indefinitely,
1538 * the maximum wait time is bounded by SCSI request timeout.
1539 */
1540 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1541
1542 init_completion(&wait);
1543 lrbp = &hba->lrb[tag];
1544 WARN_ON(lrbp->cmd);
1545 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1546 if (unlikely(err))
1547 goto out_put_tag;
1548
1549 hba->dev_cmd.complete = &wait;
1550
1551 spin_lock_irqsave(hba->host->host_lock, flags);
1552 ufshcd_send_command(hba, tag);
1553 spin_unlock_irqrestore(hba->host->host_lock, flags);
1554
1555 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1556
1557out_put_tag:
1558 ufshcd_put_dev_cmd_tag(hba, tag);
1559 wake_up(&hba->dev_cmd.tag_wq);
1560 return err;
1561}
1562
d44a5f98
DR
1563/**
1564 * ufshcd_init_query() - init the query response and request parameters
1565 * @hba: per-adapter instance
1566 * @request: address of the request pointer to be initialized
1567 * @response: address of the response pointer to be initialized
1568 * @opcode: operation to perform
1569 * @idn: flag idn to access
1570 * @index: LU number to access
1571 * @selector: query/flag/descriptor further identification
1572 */
1573static inline void ufshcd_init_query(struct ufs_hba *hba,
1574 struct ufs_query_req **request, struct ufs_query_res **response,
1575 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
1576{
1577 *request = &hba->dev_cmd.query.request;
1578 *response = &hba->dev_cmd.query.response;
1579 memset(*request, 0, sizeof(struct ufs_query_req));
1580 memset(*response, 0, sizeof(struct ufs_query_res));
1581 (*request)->upiu_req.opcode = opcode;
1582 (*request)->upiu_req.idn = idn;
1583 (*request)->upiu_req.index = index;
1584 (*request)->upiu_req.selector = selector;
1585}
1586
68078d5c
DR
1587/**
1588 * ufshcd_query_flag() - API function for sending flag query requests
1589 * hba: per-adapter instance
1590 * query_opcode: flag query to perform
1591 * idn: flag idn to access
1592 * flag_res: the flag value after the query request completes
1593 *
1594 * Returns 0 for success, non-zero in case of failure
1595 */
1596static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1597 enum flag_idn idn, bool *flag_res)
1598{
d44a5f98
DR
1599 struct ufs_query_req *request = NULL;
1600 struct ufs_query_res *response = NULL;
1601 int err, index = 0, selector = 0;
68078d5c
DR
1602
1603 BUG_ON(!hba);
1604
1ab27c9c 1605 ufshcd_hold(hba, false);
68078d5c 1606 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
1607 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1608 selector);
68078d5c
DR
1609
1610 switch (opcode) {
1611 case UPIU_QUERY_OPCODE_SET_FLAG:
1612 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1613 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1614 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1615 break;
1616 case UPIU_QUERY_OPCODE_READ_FLAG:
1617 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1618 if (!flag_res) {
1619 /* No dummy reads */
1620 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1621 __func__);
1622 err = -EINVAL;
1623 goto out_unlock;
1624 }
1625 break;
1626 default:
1627 dev_err(hba->dev,
1628 "%s: Expected query flag opcode but got = %d\n",
1629 __func__, opcode);
1630 err = -EINVAL;
1631 goto out_unlock;
1632 }
68078d5c 1633
d44a5f98 1634 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
68078d5c
DR
1635
1636 if (err) {
1637 dev_err(hba->dev,
1638 "%s: Sending flag query for idn %d failed, err = %d\n",
1639 __func__, idn, err);
1640 goto out_unlock;
1641 }
1642
1643 if (flag_res)
e8c8e82a 1644 *flag_res = (be32_to_cpu(response->upiu_res.value) &
68078d5c
DR
1645 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1646
1647out_unlock:
1648 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 1649 ufshcd_release(hba);
68078d5c
DR
1650 return err;
1651}
1652
66ec6d59
SRT
1653/**
1654 * ufshcd_query_attr - API function for sending attribute requests
1655 * hba: per-adapter instance
1656 * opcode: attribute opcode
1657 * idn: attribute idn to access
1658 * index: index field
1659 * selector: selector field
1660 * attr_val: the attribute value after the query request completes
1661 *
1662 * Returns 0 for success, non-zero in case of failure
1663*/
bdbe5d2f 1664static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
66ec6d59
SRT
1665 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1666{
d44a5f98
DR
1667 struct ufs_query_req *request = NULL;
1668 struct ufs_query_res *response = NULL;
66ec6d59
SRT
1669 int err;
1670
1671 BUG_ON(!hba);
1672
1ab27c9c 1673 ufshcd_hold(hba, false);
66ec6d59
SRT
1674 if (!attr_val) {
1675 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1676 __func__, opcode);
1677 err = -EINVAL;
1678 goto out;
1679 }
1680
1681 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
1682 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1683 selector);
66ec6d59
SRT
1684
1685 switch (opcode) {
1686 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1687 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
e8c8e82a 1688 request->upiu_req.value = cpu_to_be32(*attr_val);
66ec6d59
SRT
1689 break;
1690 case UPIU_QUERY_OPCODE_READ_ATTR:
1691 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1692 break;
1693 default:
1694 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1695 __func__, opcode);
1696 err = -EINVAL;
1697 goto out_unlock;
1698 }
1699
d44a5f98 1700 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
66ec6d59
SRT
1701
1702 if (err) {
1703 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1704 __func__, opcode, idn, err);
1705 goto out_unlock;
1706 }
1707
e8c8e82a 1708 *attr_val = be32_to_cpu(response->upiu_res.value);
66ec6d59
SRT
1709
1710out_unlock:
1711 mutex_unlock(&hba->dev_cmd.lock);
1712out:
1ab27c9c 1713 ufshcd_release(hba);
66ec6d59
SRT
1714 return err;
1715}
1716
d44a5f98
DR
1717/**
1718 * ufshcd_query_descriptor - API function for sending descriptor requests
1719 * hba: per-adapter instance
1720 * opcode: attribute opcode
1721 * idn: attribute idn to access
1722 * index: index field
1723 * selector: selector field
1724 * desc_buf: the buffer that contains the descriptor
1725 * buf_len: length parameter passed to the device
1726 *
1727 * Returns 0 for success, non-zero in case of failure.
1728 * The buf_len parameter will contain, on return, the length parameter
1729 * received on the response.
1730 */
7289f983 1731static int ufshcd_query_descriptor(struct ufs_hba *hba,
d44a5f98
DR
1732 enum query_opcode opcode, enum desc_idn idn, u8 index,
1733 u8 selector, u8 *desc_buf, int *buf_len)
1734{
1735 struct ufs_query_req *request = NULL;
1736 struct ufs_query_res *response = NULL;
1737 int err;
1738
1739 BUG_ON(!hba);
1740
1ab27c9c 1741 ufshcd_hold(hba, false);
d44a5f98
DR
1742 if (!desc_buf) {
1743 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1744 __func__, opcode);
1745 err = -EINVAL;
1746 goto out;
1747 }
1748
1749 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1750 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1751 __func__, *buf_len);
1752 err = -EINVAL;
1753 goto out;
1754 }
1755
1756 mutex_lock(&hba->dev_cmd.lock);
1757 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1758 selector);
1759 hba->dev_cmd.query.descriptor = desc_buf;
ea2aab24 1760 request->upiu_req.length = cpu_to_be16(*buf_len);
d44a5f98
DR
1761
1762 switch (opcode) {
1763 case UPIU_QUERY_OPCODE_WRITE_DESC:
1764 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1765 break;
1766 case UPIU_QUERY_OPCODE_READ_DESC:
1767 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1768 break;
1769 default:
1770 dev_err(hba->dev,
1771 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1772 __func__, opcode);
1773 err = -EINVAL;
1774 goto out_unlock;
1775 }
1776
1777 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1778
1779 if (err) {
1780 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1781 __func__, opcode, idn, err);
1782 goto out_unlock;
1783 }
1784
1785 hba->dev_cmd.query.descriptor = NULL;
ea2aab24 1786 *buf_len = be16_to_cpu(response->upiu_res.length);
d44a5f98
DR
1787
1788out_unlock:
1789 mutex_unlock(&hba->dev_cmd.lock);
1790out:
1ab27c9c 1791 ufshcd_release(hba);
d44a5f98
DR
1792 return err;
1793}
1794
da461cec
SJ
1795/**
1796 * ufshcd_read_desc_param - read the specified descriptor parameter
1797 * @hba: Pointer to adapter instance
1798 * @desc_id: descriptor idn value
1799 * @desc_index: descriptor index
1800 * @param_offset: offset of the parameter to read
1801 * @param_read_buf: pointer to buffer where parameter would be read
1802 * @param_size: sizeof(param_read_buf)
1803 *
1804 * Return 0 in case of success, non-zero otherwise
1805 */
1806static int ufshcd_read_desc_param(struct ufs_hba *hba,
1807 enum desc_idn desc_id,
1808 int desc_index,
1809 u32 param_offset,
1810 u8 *param_read_buf,
1811 u32 param_size)
1812{
1813 int ret;
1814 u8 *desc_buf;
1815 u32 buff_len;
1816 bool is_kmalloc = true;
1817
1818 /* safety checks */
1819 if (desc_id >= QUERY_DESC_IDN_MAX)
1820 return -EINVAL;
1821
1822 buff_len = ufs_query_desc_max_size[desc_id];
1823 if ((param_offset + param_size) > buff_len)
1824 return -EINVAL;
1825
1826 if (!param_offset && (param_size == buff_len)) {
1827 /* memory space already available to hold full descriptor */
1828 desc_buf = param_read_buf;
1829 is_kmalloc = false;
1830 } else {
1831 /* allocate memory to hold full descriptor */
1832 desc_buf = kmalloc(buff_len, GFP_KERNEL);
1833 if (!desc_buf)
1834 return -ENOMEM;
1835 }
1836
1837 ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
1838 desc_id, desc_index, 0, desc_buf,
1839 &buff_len);
1840
1841 if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
1842 (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
1843 ufs_query_desc_max_size[desc_id])
1844 || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
1845 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
1846 __func__, desc_id, param_offset, buff_len, ret);
1847 if (!ret)
1848 ret = -EINVAL;
1849
1850 goto out;
1851 }
1852
1853 if (is_kmalloc)
1854 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
1855out:
1856 if (is_kmalloc)
1857 kfree(desc_buf);
1858 return ret;
1859}
1860
1861static inline int ufshcd_read_desc(struct ufs_hba *hba,
1862 enum desc_idn desc_id,
1863 int desc_index,
1864 u8 *buf,
1865 u32 size)
1866{
1867 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
1868}
1869
1870static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
1871 u8 *buf,
1872 u32 size)
1873{
1874 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
1875}
1876
1877/**
1878 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
1879 * @hba: Pointer to adapter instance
1880 * @lun: lun id
1881 * @param_offset: offset of the parameter to read
1882 * @param_read_buf: pointer to buffer where parameter would be read
1883 * @param_size: sizeof(param_read_buf)
1884 *
1885 * Return 0 in case of success, non-zero otherwise
1886 */
1887static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
1888 int lun,
1889 enum unit_desc_param param_offset,
1890 u8 *param_read_buf,
1891 u32 param_size)
1892{
1893 /*
1894 * Unit descriptors are only available for general purpose LUs (LUN id
1895 * from 0 to 7) and RPMB Well known LU.
1896 */
0ce147d4 1897 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
da461cec
SJ
1898 return -EOPNOTSUPP;
1899
1900 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
1901 param_offset, param_read_buf, param_size);
1902}
1903
7a3e97b0
SY
1904/**
1905 * ufshcd_memory_alloc - allocate memory for host memory space data structures
1906 * @hba: per adapter instance
1907 *
1908 * 1. Allocate DMA memory for Command Descriptor array
1909 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
1910 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
1911 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
1912 * (UTMRDL)
1913 * 4. Allocate memory for local reference block(lrb).
1914 *
1915 * Returns 0 for success, non-zero in case of failure
1916 */
1917static int ufshcd_memory_alloc(struct ufs_hba *hba)
1918{
1919 size_t utmrdl_size, utrdl_size, ucdl_size;
1920
1921 /* Allocate memory for UTP command descriptors */
1922 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
2953f850
SJ
1923 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
1924 ucdl_size,
1925 &hba->ucdl_dma_addr,
1926 GFP_KERNEL);
7a3e97b0
SY
1927
1928 /*
1929 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
1930 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
1931 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
1932 * be aligned to 128 bytes as well
1933 */
1934 if (!hba->ucdl_base_addr ||
1935 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 1936 dev_err(hba->dev,
7a3e97b0
SY
1937 "Command Descriptor Memory allocation failed\n");
1938 goto out;
1939 }
1940
1941 /*
1942 * Allocate memory for UTP Transfer descriptors
1943 * UFSHCI requires 1024 byte alignment of UTRD
1944 */
1945 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2953f850
SJ
1946 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
1947 utrdl_size,
1948 &hba->utrdl_dma_addr,
1949 GFP_KERNEL);
7a3e97b0
SY
1950 if (!hba->utrdl_base_addr ||
1951 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 1952 dev_err(hba->dev,
7a3e97b0
SY
1953 "Transfer Descriptor Memory allocation failed\n");
1954 goto out;
1955 }
1956
1957 /*
1958 * Allocate memory for UTP Task Management descriptors
1959 * UFSHCI requires 1024 byte alignment of UTMRD
1960 */
1961 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2953f850
SJ
1962 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
1963 utmrdl_size,
1964 &hba->utmrdl_dma_addr,
1965 GFP_KERNEL);
7a3e97b0
SY
1966 if (!hba->utmrdl_base_addr ||
1967 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 1968 dev_err(hba->dev,
7a3e97b0
SY
1969 "Task Management Descriptor Memory allocation failed\n");
1970 goto out;
1971 }
1972
1973 /* Allocate memory for local reference block */
2953f850
SJ
1974 hba->lrb = devm_kzalloc(hba->dev,
1975 hba->nutrs * sizeof(struct ufshcd_lrb),
1976 GFP_KERNEL);
7a3e97b0 1977 if (!hba->lrb) {
3b1d0580 1978 dev_err(hba->dev, "LRB Memory allocation failed\n");
7a3e97b0
SY
1979 goto out;
1980 }
1981 return 0;
1982out:
7a3e97b0
SY
1983 return -ENOMEM;
1984}
1985
1986/**
1987 * ufshcd_host_memory_configure - configure local reference block with
1988 * memory offsets
1989 * @hba: per adapter instance
1990 *
1991 * Configure Host memory space
1992 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
1993 * address.
1994 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
1995 * and PRDT offset.
1996 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
1997 * into local reference block.
1998 */
1999static void ufshcd_host_memory_configure(struct ufs_hba *hba)
2000{
2001 struct utp_transfer_cmd_desc *cmd_descp;
2002 struct utp_transfer_req_desc *utrdlp;
2003 dma_addr_t cmd_desc_dma_addr;
2004 dma_addr_t cmd_desc_element_addr;
2005 u16 response_offset;
2006 u16 prdt_offset;
2007 int cmd_desc_size;
2008 int i;
2009
2010 utrdlp = hba->utrdl_base_addr;
2011 cmd_descp = hba->ucdl_base_addr;
2012
2013 response_offset =
2014 offsetof(struct utp_transfer_cmd_desc, response_upiu);
2015 prdt_offset =
2016 offsetof(struct utp_transfer_cmd_desc, prd_table);
2017
2018 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
2019 cmd_desc_dma_addr = hba->ucdl_dma_addr;
2020
2021 for (i = 0; i < hba->nutrs; i++) {
2022 /* Configure UTRD with command descriptor base address */
2023 cmd_desc_element_addr =
2024 (cmd_desc_dma_addr + (cmd_desc_size * i));
2025 utrdlp[i].command_desc_base_addr_lo =
2026 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
2027 utrdlp[i].command_desc_base_addr_hi =
2028 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
2029
2030 /* Response upiu and prdt offset should be in double words */
2031 utrdlp[i].response_upiu_offset =
2032 cpu_to_le16((response_offset >> 2));
2033 utrdlp[i].prd_table_offset =
2034 cpu_to_le16((prdt_offset >> 2));
2035 utrdlp[i].response_upiu_length =
3ca316c5 2036 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
7a3e97b0
SY
2037
2038 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
5a0b0cb9
SRT
2039 hba->lrb[i].ucd_req_ptr =
2040 (struct utp_upiu_req *)(cmd_descp + i);
7a3e97b0
SY
2041 hba->lrb[i].ucd_rsp_ptr =
2042 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
2043 hba->lrb[i].ucd_prdt_ptr =
2044 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
2045 }
2046}
2047
2048/**
2049 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
2050 * @hba: per adapter instance
2051 *
2052 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
2053 * in order to initialize the Unipro link startup procedure.
2054 * Once the Unipro links are up, the device connected to the controller
2055 * is detected.
2056 *
2057 * Returns 0 on success, non-zero value on failure
2058 */
2059static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2060{
6ccf44fe
SJ
2061 struct uic_command uic_cmd = {0};
2062 int ret;
7a3e97b0 2063
6ccf44fe 2064 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
7a3e97b0 2065
6ccf44fe
SJ
2066 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2067 if (ret)
2068 dev_err(hba->dev,
2069 "dme-link-startup: error code %d\n", ret);
2070 return ret;
7a3e97b0
SY
2071}
2072
cad2e03d
YG
2073static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
2074{
2075 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
2076 unsigned long min_sleep_time_us;
2077
2078 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
2079 return;
2080
2081 /*
2082 * last_dme_cmd_tstamp will be 0 only for 1st call to
2083 * this function
2084 */
2085 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
2086 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
2087 } else {
2088 unsigned long delta =
2089 (unsigned long) ktime_to_us(
2090 ktime_sub(ktime_get(),
2091 hba->last_dme_cmd_tstamp));
2092
2093 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
2094 min_sleep_time_us =
2095 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
2096 else
2097 return; /* no more delay required */
2098 }
2099
2100 /* allow sleep for extra 50us if needed */
2101 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
2102}
2103
12b4fdb4
SJ
2104/**
2105 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
2106 * @hba: per adapter instance
2107 * @attr_sel: uic command argument1
2108 * @attr_set: attribute set type as uic command argument2
2109 * @mib_val: setting value as uic command argument3
2110 * @peer: indicate whether peer or local
2111 *
2112 * Returns 0 on success, non-zero value on failure
2113 */
2114int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
2115 u8 attr_set, u32 mib_val, u8 peer)
2116{
2117 struct uic_command uic_cmd = {0};
2118 static const char *const action[] = {
2119 "dme-set",
2120 "dme-peer-set"
2121 };
2122 const char *set = action[!!peer];
2123 int ret;
2124
2125 uic_cmd.command = peer ?
2126 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
2127 uic_cmd.argument1 = attr_sel;
2128 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
2129 uic_cmd.argument3 = mib_val;
2130
2131 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2132 if (ret)
2133 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
2134 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
2135
2136 return ret;
2137}
2138EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
2139
2140/**
2141 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
2142 * @hba: per adapter instance
2143 * @attr_sel: uic command argument1
2144 * @mib_val: the value of the attribute as returned by the UIC command
2145 * @peer: indicate whether peer or local
2146 *
2147 * Returns 0 on success, non-zero value on failure
2148 */
2149int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
2150 u32 *mib_val, u8 peer)
2151{
2152 struct uic_command uic_cmd = {0};
2153 static const char *const action[] = {
2154 "dme-get",
2155 "dme-peer-get"
2156 };
2157 const char *get = action[!!peer];
2158 int ret;
2159
2160 uic_cmd.command = peer ?
2161 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
2162 uic_cmd.argument1 = attr_sel;
2163
2164 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2165 if (ret) {
2166 dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
2167 get, UIC_GET_ATTR_ID(attr_sel), ret);
2168 goto out;
2169 }
2170
2171 if (mib_val)
2172 *mib_val = uic_cmd.argument3;
2173out:
2174 return ret;
2175}
2176EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
2177
53b3d9c3 2178/**
57d104c1
SJ
2179 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
2180 * state) and waits for it to take effect.
2181 *
53b3d9c3 2182 * @hba: per adapter instance
57d104c1
SJ
2183 * @cmd: UIC command to execute
2184 *
2185 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
2186 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
2187 * and device UniPro link and hence it's final completion would be indicated by
2188 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
2189 * addition to normal UIC command completion Status (UCCS). This function only
2190 * returns after the relevant status bits indicate the completion.
53b3d9c3
SJ
2191 *
2192 * Returns 0 on success, non-zero value on failure
2193 */
57d104c1 2194static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
53b3d9c3 2195{
57d104c1 2196 struct completion uic_async_done;
53b3d9c3
SJ
2197 unsigned long flags;
2198 u8 status;
2199 int ret;
2200
53b3d9c3 2201 mutex_lock(&hba->uic_cmd_mutex);
57d104c1 2202 init_completion(&uic_async_done);
cad2e03d 2203 ufshcd_add_delay_before_dme_cmd(hba);
53b3d9c3
SJ
2204
2205 spin_lock_irqsave(hba->host->host_lock, flags);
57d104c1
SJ
2206 hba->uic_async_done = &uic_async_done;
2207 ret = __ufshcd_send_uic_cmd(hba, cmd);
53b3d9c3 2208 spin_unlock_irqrestore(hba->host->host_lock, flags);
53b3d9c3
SJ
2209 if (ret) {
2210 dev_err(hba->dev,
57d104c1
SJ
2211 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2212 cmd->command, cmd->argument3, ret);
2213 goto out;
2214 }
2215 ret = ufshcd_wait_for_uic_cmd(hba, cmd);
2216 if (ret) {
2217 dev_err(hba->dev,
2218 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2219 cmd->command, cmd->argument3, ret);
53b3d9c3
SJ
2220 goto out;
2221 }
2222
57d104c1 2223 if (!wait_for_completion_timeout(hba->uic_async_done,
53b3d9c3
SJ
2224 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2225 dev_err(hba->dev,
57d104c1
SJ
2226 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
2227 cmd->command, cmd->argument3);
53b3d9c3
SJ
2228 ret = -ETIMEDOUT;
2229 goto out;
2230 }
2231
2232 status = ufshcd_get_upmcrs(hba);
2233 if (status != PWR_LOCAL) {
2234 dev_err(hba->dev,
57d104c1
SJ
2235 "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
2236 cmd->command, status);
53b3d9c3
SJ
2237 ret = (status != PWR_OK) ? status : -1;
2238 }
2239out:
2240 spin_lock_irqsave(hba->host->host_lock, flags);
57d104c1 2241 hba->uic_async_done = NULL;
53b3d9c3
SJ
2242 spin_unlock_irqrestore(hba->host->host_lock, flags);
2243 mutex_unlock(&hba->uic_cmd_mutex);
1ab27c9c 2244
53b3d9c3
SJ
2245 return ret;
2246}
2247
57d104c1
SJ
2248/**
2249 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
2250 * using DME_SET primitives.
2251 * @hba: per adapter instance
2252 * @mode: powr mode value
2253 *
2254 * Returns 0 on success, non-zero value on failure
2255 */
2256static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
2257{
2258 struct uic_command uic_cmd = {0};
1ab27c9c 2259 int ret;
57d104c1
SJ
2260
2261 uic_cmd.command = UIC_CMD_DME_SET;
2262 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
2263 uic_cmd.argument3 = mode;
1ab27c9c
ST
2264 ufshcd_hold(hba, false);
2265 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2266 ufshcd_release(hba);
57d104c1 2267
1ab27c9c 2268 return ret;
57d104c1
SJ
2269}
2270
2271static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2272{
2273 struct uic_command uic_cmd = {0};
2274
2275 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
2276
2277 return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2278}
2279
2280static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2281{
2282 struct uic_command uic_cmd = {0};
2283 int ret;
2284
2285 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
2286 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2287 if (ret) {
2288 ufshcd_set_link_off(hba);
2289 ret = ufshcd_host_reset_and_restore(hba);
2290 }
2291
2292 return ret;
2293}
2294
5064636c
YG
2295 /**
2296 * ufshcd_init_pwr_info - setting the POR (power on reset)
2297 * values in hba power info
2298 * @hba: per-adapter instance
2299 */
2300static void ufshcd_init_pwr_info(struct ufs_hba *hba)
2301{
2302 hba->pwr_info.gear_rx = UFS_PWM_G1;
2303 hba->pwr_info.gear_tx = UFS_PWM_G1;
2304 hba->pwr_info.lane_rx = 1;
2305 hba->pwr_info.lane_tx = 1;
2306 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
2307 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
2308 hba->pwr_info.hs_rate = 0;
2309}
2310
d3e89bac 2311/**
7eb584db
DR
2312 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
2313 * @hba: per-adapter instance
d3e89bac 2314 */
7eb584db 2315static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
d3e89bac 2316{
7eb584db
DR
2317 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
2318
2319 if (hba->max_pwr_info.is_valid)
2320 return 0;
2321
2322 pwr_info->pwr_tx = FASTAUTO_MODE;
2323 pwr_info->pwr_rx = FASTAUTO_MODE;
2324 pwr_info->hs_rate = PA_HS_MODE_B;
d3e89bac
SJ
2325
2326 /* Get the connected lane count */
7eb584db
DR
2327 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
2328 &pwr_info->lane_rx);
2329 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2330 &pwr_info->lane_tx);
2331
2332 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
2333 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
2334 __func__,
2335 pwr_info->lane_rx,
2336 pwr_info->lane_tx);
2337 return -EINVAL;
2338 }
d3e89bac
SJ
2339
2340 /*
2341 * First, get the maximum gears of HS speed.
2342 * If a zero value, it means there is no HSGEAR capability.
2343 * Then, get the maximum gears of PWM speed.
2344 */
7eb584db
DR
2345 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
2346 if (!pwr_info->gear_rx) {
2347 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
2348 &pwr_info->gear_rx);
2349 if (!pwr_info->gear_rx) {
2350 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
2351 __func__, pwr_info->gear_rx);
2352 return -EINVAL;
2353 }
2354 pwr_info->pwr_rx = SLOWAUTO_MODE;
d3e89bac
SJ
2355 }
2356
7eb584db
DR
2357 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
2358 &pwr_info->gear_tx);
2359 if (!pwr_info->gear_tx) {
d3e89bac 2360 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
7eb584db
DR
2361 &pwr_info->gear_tx);
2362 if (!pwr_info->gear_tx) {
2363 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
2364 __func__, pwr_info->gear_tx);
2365 return -EINVAL;
2366 }
2367 pwr_info->pwr_tx = SLOWAUTO_MODE;
2368 }
2369
2370 hba->max_pwr_info.is_valid = true;
2371 return 0;
2372}
2373
2374static int ufshcd_change_power_mode(struct ufs_hba *hba,
2375 struct ufs_pa_layer_attr *pwr_mode)
2376{
2377 int ret;
2378
2379 /* if already configured to the requested pwr_mode */
2380 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
2381 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
2382 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
2383 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
2384 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
2385 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
2386 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
2387 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
2388 return 0;
d3e89bac
SJ
2389 }
2390
2391 /*
2392 * Configure attributes for power mode change with below.
2393 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
2394 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
2395 * - PA_HSSERIES
2396 */
7eb584db
DR
2397 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
2398 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
2399 pwr_mode->lane_rx);
2400 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2401 pwr_mode->pwr_rx == FAST_MODE)
d3e89bac 2402 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
7eb584db
DR
2403 else
2404 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
d3e89bac 2405
7eb584db
DR
2406 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
2407 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
2408 pwr_mode->lane_tx);
2409 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
2410 pwr_mode->pwr_tx == FAST_MODE)
d3e89bac 2411 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
7eb584db
DR
2412 else
2413 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
d3e89bac 2414
7eb584db
DR
2415 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2416 pwr_mode->pwr_tx == FASTAUTO_MODE ||
2417 pwr_mode->pwr_rx == FAST_MODE ||
2418 pwr_mode->pwr_tx == FAST_MODE)
2419 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
2420 pwr_mode->hs_rate);
d3e89bac 2421
7eb584db
DR
2422 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
2423 | pwr_mode->pwr_tx);
2424
2425 if (ret) {
d3e89bac 2426 dev_err(hba->dev,
7eb584db
DR
2427 "%s: power mode change failed %d\n", __func__, ret);
2428 } else {
2429 if (hba->vops && hba->vops->pwr_change_notify)
2430 hba->vops->pwr_change_notify(hba,
2431 POST_CHANGE, NULL, pwr_mode);
2432
2433 memcpy(&hba->pwr_info, pwr_mode,
2434 sizeof(struct ufs_pa_layer_attr));
2435 }
2436
2437 return ret;
2438}
2439
2440/**
2441 * ufshcd_config_pwr_mode - configure a new power mode
2442 * @hba: per-adapter instance
2443 * @desired_pwr_mode: desired power configuration
2444 */
2445static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
2446 struct ufs_pa_layer_attr *desired_pwr_mode)
2447{
2448 struct ufs_pa_layer_attr final_params = { 0 };
2449 int ret;
2450
2451 if (hba->vops && hba->vops->pwr_change_notify)
2452 hba->vops->pwr_change_notify(hba,
2453 PRE_CHANGE, desired_pwr_mode, &final_params);
2454 else
2455 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
2456
2457 ret = ufshcd_change_power_mode(hba, &final_params);
d3e89bac
SJ
2458
2459 return ret;
2460}
2461
68078d5c
DR
2462/**
2463 * ufshcd_complete_dev_init() - checks device readiness
2464 * hba: per-adapter instance
2465 *
2466 * Set fDeviceInit flag and poll until device toggles it.
2467 */
2468static int ufshcd_complete_dev_init(struct ufs_hba *hba)
2469{
2470 int i, retries, err = 0;
2471 bool flag_res = 1;
2472
2473 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2474 /* Set the fDeviceInit flag */
2475 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2476 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
2477 if (!err || err == -ETIMEDOUT)
2478 break;
2479 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
2480 }
2481 if (err) {
2482 dev_err(hba->dev,
2483 "%s setting fDeviceInit flag failed with error %d\n",
2484 __func__, err);
2485 goto out;
2486 }
2487
2488 /* poll for max. 100 iterations for fDeviceInit flag to clear */
2489 for (i = 0; i < 100 && !err && flag_res; i++) {
2490 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2491 err = ufshcd_query_flag(hba,
2492 UPIU_QUERY_OPCODE_READ_FLAG,
2493 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
2494 if (!err || err == -ETIMEDOUT)
2495 break;
2496 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
2497 err);
2498 }
2499 }
2500 if (err)
2501 dev_err(hba->dev,
2502 "%s reading fDeviceInit flag failed with error %d\n",
2503 __func__, err);
2504 else if (flag_res)
2505 dev_err(hba->dev,
2506 "%s fDeviceInit was not cleared by the device\n",
2507 __func__);
2508
2509out:
2510 return err;
2511}
2512
7a3e97b0
SY
2513/**
2514 * ufshcd_make_hba_operational - Make UFS controller operational
2515 * @hba: per adapter instance
2516 *
2517 * To bring UFS host controller to operational state,
5c0c28a8
SRT
2518 * 1. Enable required interrupts
2519 * 2. Configure interrupt aggregation
2520 * 3. Program UTRL and UTMRL base addres
2521 * 4. Configure run-stop-registers
7a3e97b0
SY
2522 *
2523 * Returns 0 on success, non-zero value on failure
2524 */
2525static int ufshcd_make_hba_operational(struct ufs_hba *hba)
2526{
2527 int err = 0;
2528 u32 reg;
2529
6ccf44fe
SJ
2530 /* Enable required interrupts */
2531 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
2532
2533 /* Configure interrupt aggregation */
b852190e
YG
2534 if (ufshcd_is_intr_aggr_allowed(hba))
2535 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
2536 else
2537 ufshcd_disable_intr_aggr(hba);
6ccf44fe
SJ
2538
2539 /* Configure UTRL and UTMRL base address registers */
2540 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
2541 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
2542 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
2543 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
2544 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
2545 REG_UTP_TASK_REQ_LIST_BASE_L);
2546 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
2547 REG_UTP_TASK_REQ_LIST_BASE_H);
2548
7a3e97b0
SY
2549 /*
2550 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
2551 * DEI, HEI bits must be 0
2552 */
5c0c28a8 2553 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
7a3e97b0
SY
2554 if (!(ufshcd_get_lists_status(reg))) {
2555 ufshcd_enable_run_stop_reg(hba);
2556 } else {
3b1d0580 2557 dev_err(hba->dev,
7a3e97b0
SY
2558 "Host controller not ready to process requests");
2559 err = -EIO;
2560 goto out;
2561 }
2562
7a3e97b0
SY
2563out:
2564 return err;
2565}
2566
2567/**
2568 * ufshcd_hba_enable - initialize the controller
2569 * @hba: per adapter instance
2570 *
2571 * The controller resets itself and controller firmware initialization
2572 * sequence kicks off. When controller is ready it will set
2573 * the Host Controller Enable bit to 1.
2574 *
2575 * Returns 0 on success, non-zero value on failure
2576 */
2577static int ufshcd_hba_enable(struct ufs_hba *hba)
2578{
2579 int retry;
2580
2581 /*
2582 * msleep of 1 and 5 used in this function might result in msleep(20),
2583 * but it was necessary to send the UFS FPGA to reset mode during
2584 * development and testing of this driver. msleep can be changed to
2585 * mdelay and retry count can be reduced based on the controller.
2586 */
2587 if (!ufshcd_is_hba_active(hba)) {
2588
2589 /* change controller state to "reset state" */
2590 ufshcd_hba_stop(hba);
2591
2592 /*
2593 * This delay is based on the testing done with UFS host
2594 * controller FPGA. The delay can be changed based on the
2595 * host controller used.
2596 */
2597 msleep(5);
2598 }
2599
57d104c1
SJ
2600 /* UniPro link is disabled at this point */
2601 ufshcd_set_link_off(hba);
2602
5c0c28a8
SRT
2603 if (hba->vops && hba->vops->hce_enable_notify)
2604 hba->vops->hce_enable_notify(hba, PRE_CHANGE);
2605
7a3e97b0
SY
2606 /* start controller initialization sequence */
2607 ufshcd_hba_start(hba);
2608
2609 /*
2610 * To initialize a UFS host controller HCE bit must be set to 1.
2611 * During initialization the HCE bit value changes from 1->0->1.
2612 * When the host controller completes initialization sequence
2613 * it sets the value of HCE bit to 1. The same HCE bit is read back
2614 * to check if the controller has completed initialization sequence.
2615 * So without this delay the value HCE = 1, set in the previous
2616 * instruction might be read back.
2617 * This delay can be changed based on the controller.
2618 */
2619 msleep(1);
2620
2621 /* wait for the host controller to complete initialization */
2622 retry = 10;
2623 while (ufshcd_is_hba_active(hba)) {
2624 if (retry) {
2625 retry--;
2626 } else {
3b1d0580 2627 dev_err(hba->dev,
7a3e97b0
SY
2628 "Controller enable failed\n");
2629 return -EIO;
2630 }
2631 msleep(5);
2632 }
5c0c28a8 2633
1d337ec2 2634 /* enable UIC related interrupts */
57d104c1 2635 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
1d337ec2 2636
5c0c28a8
SRT
2637 if (hba->vops && hba->vops->hce_enable_notify)
2638 hba->vops->hce_enable_notify(hba, POST_CHANGE);
2639
7a3e97b0
SY
2640 return 0;
2641}
2642
2643/**
6ccf44fe 2644 * ufshcd_link_startup - Initialize unipro link startup
7a3e97b0
SY
2645 * @hba: per adapter instance
2646 *
6ccf44fe 2647 * Returns 0 for success, non-zero in case of failure
7a3e97b0 2648 */
6ccf44fe 2649static int ufshcd_link_startup(struct ufs_hba *hba)
7a3e97b0 2650{
6ccf44fe 2651 int ret;
1d337ec2 2652 int retries = DME_LINKSTARTUP_RETRIES;
7a3e97b0 2653
1d337ec2
SRT
2654 do {
2655 if (hba->vops && hba->vops->link_startup_notify)
2656 hba->vops->link_startup_notify(hba, PRE_CHANGE);
6ccf44fe 2657
1d337ec2 2658 ret = ufshcd_dme_link_startup(hba);
5c0c28a8 2659
1d337ec2
SRT
2660 /* check if device is detected by inter-connect layer */
2661 if (!ret && !ufshcd_is_device_present(hba)) {
2662 dev_err(hba->dev, "%s: Device not present\n", __func__);
2663 ret = -ENXIO;
2664 goto out;
2665 }
6ccf44fe 2666
1d337ec2
SRT
2667 /*
2668 * DME link lost indication is only received when link is up,
2669 * but we can't be sure if the link is up until link startup
2670 * succeeds. So reset the local Uni-Pro and try again.
2671 */
2672 if (ret && ufshcd_hba_enable(hba))
2673 goto out;
2674 } while (ret && retries--);
2675
2676 if (ret)
2677 /* failed to get the link up... retire */
5c0c28a8 2678 goto out;
5c0c28a8
SRT
2679
2680 /* Include any host controller configuration via UIC commands */
2681 if (hba->vops && hba->vops->link_startup_notify) {
2682 ret = hba->vops->link_startup_notify(hba, POST_CHANGE);
2683 if (ret)
2684 goto out;
2685 }
7a3e97b0 2686
5c0c28a8 2687 ret = ufshcd_make_hba_operational(hba);
6ccf44fe
SJ
2688out:
2689 if (ret)
2690 dev_err(hba->dev, "link startup failed %d\n", ret);
2691 return ret;
7a3e97b0
SY
2692}
2693
5a0b0cb9
SRT
2694/**
2695 * ufshcd_verify_dev_init() - Verify device initialization
2696 * @hba: per-adapter instance
2697 *
2698 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
2699 * device Transport Protocol (UTP) layer is ready after a reset.
2700 * If the UTP layer at the device side is not initialized, it may
2701 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
2702 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
2703 */
2704static int ufshcd_verify_dev_init(struct ufs_hba *hba)
2705{
2706 int err = 0;
2707 int retries;
2708
1ab27c9c 2709 ufshcd_hold(hba, false);
5a0b0cb9
SRT
2710 mutex_lock(&hba->dev_cmd.lock);
2711 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
2712 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
2713 NOP_OUT_TIMEOUT);
2714
2715 if (!err || err == -ETIMEDOUT)
2716 break;
2717
2718 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
2719 }
2720 mutex_unlock(&hba->dev_cmd.lock);
1ab27c9c 2721 ufshcd_release(hba);
5a0b0cb9
SRT
2722
2723 if (err)
2724 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
2725 return err;
2726}
2727
0ce147d4
SJ
2728/**
2729 * ufshcd_set_queue_depth - set lun queue depth
2730 * @sdev: pointer to SCSI device
2731 *
2732 * Read bLUQueueDepth value and activate scsi tagged command
2733 * queueing. For WLUN, queue depth is set to 1. For best-effort
2734 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
2735 * value that host can queue.
2736 */
2737static void ufshcd_set_queue_depth(struct scsi_device *sdev)
2738{
2739 int ret = 0;
2740 u8 lun_qdepth;
2741 struct ufs_hba *hba;
2742
2743 hba = shost_priv(sdev->host);
2744
2745 lun_qdepth = hba->nutrs;
2746 ret = ufshcd_read_unit_desc_param(hba,
2747 ufshcd_scsi_to_upiu_lun(sdev->lun),
2748 UNIT_DESC_PARAM_LU_Q_DEPTH,
2749 &lun_qdepth,
2750 sizeof(lun_qdepth));
2751
2752 /* Some WLUN doesn't support unit descriptor */
2753 if (ret == -EOPNOTSUPP)
2754 lun_qdepth = 1;
2755 else if (!lun_qdepth)
2756 /* eventually, we can figure out the real queue depth */
2757 lun_qdepth = hba->nutrs;
2758 else
2759 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
2760
2761 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
2762 __func__, lun_qdepth);
db5ed4df 2763 scsi_change_queue_depth(sdev, lun_qdepth);
0ce147d4
SJ
2764}
2765
57d104c1
SJ
2766/*
2767 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
2768 * @hba: per-adapter instance
2769 * @lun: UFS device lun id
2770 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
2771 *
2772 * Returns 0 in case of success and b_lu_write_protect status would be returned
2773 * @b_lu_write_protect parameter.
2774 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
2775 * Returns -EINVAL in case of invalid parameters passed to this function.
2776 */
2777static int ufshcd_get_lu_wp(struct ufs_hba *hba,
2778 u8 lun,
2779 u8 *b_lu_write_protect)
2780{
2781 int ret;
2782
2783 if (!b_lu_write_protect)
2784 ret = -EINVAL;
2785 /*
2786 * According to UFS device spec, RPMB LU can't be write
2787 * protected so skip reading bLUWriteProtect parameter for
2788 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
2789 */
2790 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
2791 ret = -ENOTSUPP;
2792 else
2793 ret = ufshcd_read_unit_desc_param(hba,
2794 lun,
2795 UNIT_DESC_PARAM_LU_WR_PROTECT,
2796 b_lu_write_protect,
2797 sizeof(*b_lu_write_protect));
2798 return ret;
2799}
2800
2801/**
2802 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
2803 * status
2804 * @hba: per-adapter instance
2805 * @sdev: pointer to SCSI device
2806 *
2807 */
2808static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
2809 struct scsi_device *sdev)
2810{
2811 if (hba->dev_info.f_power_on_wp_en &&
2812 !hba->dev_info.is_lu_power_on_wp) {
2813 u8 b_lu_write_protect;
2814
2815 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
2816 &b_lu_write_protect) &&
2817 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
2818 hba->dev_info.is_lu_power_on_wp = true;
2819 }
2820}
2821
7a3e97b0
SY
2822/**
2823 * ufshcd_slave_alloc - handle initial SCSI device configurations
2824 * @sdev: pointer to SCSI device
2825 *
2826 * Returns success
2827 */
2828static int ufshcd_slave_alloc(struct scsi_device *sdev)
2829{
2830 struct ufs_hba *hba;
2831
2832 hba = shost_priv(sdev->host);
7a3e97b0
SY
2833
2834 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
2835 sdev->use_10_for_ms = 1;
7a3e97b0 2836
e8e7f271
SRT
2837 /* allow SCSI layer to restart the device in case of errors */
2838 sdev->allow_restart = 1;
4264fd61 2839
b2a6c522
SRT
2840 /* REPORT SUPPORTED OPERATION CODES is not supported */
2841 sdev->no_report_opcodes = 1;
2842
e8e7f271 2843
0ce147d4 2844 ufshcd_set_queue_depth(sdev);
4264fd61 2845
57d104c1
SJ
2846 ufshcd_get_lu_power_on_wp_status(hba, sdev);
2847
7a3e97b0
SY
2848 return 0;
2849}
2850
4264fd61
SRT
2851/**
2852 * ufshcd_change_queue_depth - change queue depth
2853 * @sdev: pointer to SCSI device
2854 * @depth: required depth to set
4264fd61 2855 *
db5ed4df 2856 * Change queue depth and make sure the max. limits are not crossed.
4264fd61 2857 */
db5ed4df 2858static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4264fd61
SRT
2859{
2860 struct ufs_hba *hba = shost_priv(sdev->host);
2861
2862 if (depth > hba->nutrs)
2863 depth = hba->nutrs;
db5ed4df 2864 return scsi_change_queue_depth(sdev, depth);
4264fd61
SRT
2865}
2866
eeda4749
AM
2867/**
2868 * ufshcd_slave_configure - adjust SCSI device configurations
2869 * @sdev: pointer to SCSI device
2870 */
2871static int ufshcd_slave_configure(struct scsi_device *sdev)
2872{
2873 struct request_queue *q = sdev->request_queue;
2874
2875 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
2876 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
2877
2878 return 0;
2879}
2880
7a3e97b0
SY
2881/**
2882 * ufshcd_slave_destroy - remove SCSI device configurations
2883 * @sdev: pointer to SCSI device
2884 */
2885static void ufshcd_slave_destroy(struct scsi_device *sdev)
2886{
2887 struct ufs_hba *hba;
2888
2889 hba = shost_priv(sdev->host);
0ce147d4 2890 /* Drop the reference as it won't be needed anymore */
7c48bfd0
AM
2891 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
2892 unsigned long flags;
2893
2894 spin_lock_irqsave(hba->host->host_lock, flags);
0ce147d4 2895 hba->sdev_ufs_device = NULL;
7c48bfd0
AM
2896 spin_unlock_irqrestore(hba->host->host_lock, flags);
2897 }
7a3e97b0
SY
2898}
2899
2900/**
2901 * ufshcd_task_req_compl - handle task management request completion
2902 * @hba: per adapter instance
2903 * @index: index of the completed request
e2933132 2904 * @resp: task management service response
7a3e97b0 2905 *
e2933132 2906 * Returns non-zero value on error, zero on success
7a3e97b0 2907 */
e2933132 2908static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
7a3e97b0
SY
2909{
2910 struct utp_task_req_desc *task_req_descp;
2911 struct utp_upiu_task_rsp *task_rsp_upiup;
2912 unsigned long flags;
2913 int ocs_value;
2914 int task_result;
2915
2916 spin_lock_irqsave(hba->host->host_lock, flags);
2917
2918 /* Clear completed tasks from outstanding_tasks */
2919 __clear_bit(index, &hba->outstanding_tasks);
2920
2921 task_req_descp = hba->utmrdl_base_addr;
2922 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
2923
2924 if (ocs_value == OCS_SUCCESS) {
2925 task_rsp_upiup = (struct utp_upiu_task_rsp *)
2926 task_req_descp[index].task_rsp_upiu;
2927 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
2928 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
e2933132
SRT
2929 if (resp)
2930 *resp = (u8)task_result;
7a3e97b0 2931 } else {
e2933132
SRT
2932 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
2933 __func__, ocs_value);
7a3e97b0
SY
2934 }
2935 spin_unlock_irqrestore(hba->host->host_lock, flags);
e2933132
SRT
2936
2937 return ocs_value;
7a3e97b0
SY
2938}
2939
7a3e97b0
SY
2940/**
2941 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
2942 * @lrb: pointer to local reference block of completed command
2943 * @scsi_status: SCSI command status
2944 *
2945 * Returns value base on SCSI command status
2946 */
2947static inline int
2948ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
2949{
2950 int result = 0;
2951
2952 switch (scsi_status) {
7a3e97b0 2953 case SAM_STAT_CHECK_CONDITION:
1c2623c5
SJ
2954 ufshcd_copy_sense_data(lrbp);
2955 case SAM_STAT_GOOD:
7a3e97b0
SY
2956 result |= DID_OK << 16 |
2957 COMMAND_COMPLETE << 8 |
1c2623c5 2958 scsi_status;
7a3e97b0
SY
2959 break;
2960 case SAM_STAT_TASK_SET_FULL:
1c2623c5 2961 case SAM_STAT_BUSY:
7a3e97b0 2962 case SAM_STAT_TASK_ABORTED:
1c2623c5
SJ
2963 ufshcd_copy_sense_data(lrbp);
2964 result |= scsi_status;
7a3e97b0
SY
2965 break;
2966 default:
2967 result |= DID_ERROR << 16;
2968 break;
2969 } /* end of switch */
2970
2971 return result;
2972}
2973
2974/**
2975 * ufshcd_transfer_rsp_status - Get overall status of the response
2976 * @hba: per adapter instance
2977 * @lrb: pointer to local reference block of completed command
2978 *
2979 * Returns result of the command to notify SCSI midlayer
2980 */
2981static inline int
2982ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2983{
2984 int result = 0;
2985 int scsi_status;
2986 int ocs;
2987
2988 /* overall command status of utrd */
2989 ocs = ufshcd_get_tr_ocs(lrbp);
2990
2991 switch (ocs) {
2992 case OCS_SUCCESS:
5a0b0cb9 2993 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
7a3e97b0 2994
5a0b0cb9
SRT
2995 switch (result) {
2996 case UPIU_TRANSACTION_RESPONSE:
2997 /*
2998 * get the response UPIU result to extract
2999 * the SCSI command status
3000 */
3001 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
3002
3003 /*
3004 * get the result based on SCSI status response
3005 * to notify the SCSI midlayer of the command status
3006 */
3007 scsi_status = result & MASK_SCSI_STATUS;
3008 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
66ec6d59
SRT
3009
3010 if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
3011 schedule_work(&hba->eeh_work);
5a0b0cb9
SRT
3012 break;
3013 case UPIU_TRANSACTION_REJECT_UPIU:
3014 /* TODO: handle Reject UPIU Response */
3015 result = DID_ERROR << 16;
3b1d0580 3016 dev_err(hba->dev,
5a0b0cb9
SRT
3017 "Reject UPIU not fully implemented\n");
3018 break;
3019 default:
3020 result = DID_ERROR << 16;
3021 dev_err(hba->dev,
3022 "Unexpected request response code = %x\n",
3023 result);
7a3e97b0
SY
3024 break;
3025 }
7a3e97b0
SY
3026 break;
3027 case OCS_ABORTED:
3028 result |= DID_ABORT << 16;
3029 break;
e8e7f271
SRT
3030 case OCS_INVALID_COMMAND_STATUS:
3031 result |= DID_REQUEUE << 16;
3032 break;
7a3e97b0
SY
3033 case OCS_INVALID_CMD_TABLE_ATTR:
3034 case OCS_INVALID_PRDT_ATTR:
3035 case OCS_MISMATCH_DATA_BUF_SIZE:
3036 case OCS_MISMATCH_RESP_UPIU_SIZE:
3037 case OCS_PEER_COMM_FAILURE:
3038 case OCS_FATAL_ERROR:
3039 default:
3040 result |= DID_ERROR << 16;
3b1d0580 3041 dev_err(hba->dev,
7a3e97b0
SY
3042 "OCS error from controller = %x\n", ocs);
3043 break;
3044 } /* end of switch */
3045
3046 return result;
3047}
3048
6ccf44fe
SJ
3049/**
3050 * ufshcd_uic_cmd_compl - handle completion of uic command
3051 * @hba: per adapter instance
53b3d9c3 3052 * @intr_status: interrupt status generated by the controller
6ccf44fe 3053 */
53b3d9c3 3054static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
6ccf44fe 3055{
53b3d9c3 3056 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
6ccf44fe
SJ
3057 hba->active_uic_cmd->argument2 |=
3058 ufshcd_get_uic_cmd_result(hba);
12b4fdb4
SJ
3059 hba->active_uic_cmd->argument3 =
3060 ufshcd_get_dme_attr_val(hba);
6ccf44fe
SJ
3061 complete(&hba->active_uic_cmd->done);
3062 }
53b3d9c3 3063
57d104c1
SJ
3064 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
3065 complete(hba->uic_async_done);
6ccf44fe
SJ
3066}
3067
7a3e97b0
SY
3068/**
3069 * ufshcd_transfer_req_compl - handle SCSI and query command completion
3070 * @hba: per adapter instance
3071 */
3072static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
3073{
5a0b0cb9
SRT
3074 struct ufshcd_lrb *lrbp;
3075 struct scsi_cmnd *cmd;
7a3e97b0
SY
3076 unsigned long completed_reqs;
3077 u32 tr_doorbell;
3078 int result;
3079 int index;
e9d501b1
DR
3080
3081 /* Resetting interrupt aggregation counters first and reading the
3082 * DOOR_BELL afterward allows us to handle all the completed requests.
3083 * In order to prevent other interrupts starvation the DB is read once
3084 * after reset. The down side of this solution is the possibility of
3085 * false interrupt if device completes another request after resetting
3086 * aggregation and before reading the DB.
3087 */
b852190e
YG
3088 if (ufshcd_is_intr_aggr_allowed(hba))
3089 ufshcd_reset_intr_aggr(hba);
7a3e97b0 3090
b873a275 3091 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7a3e97b0
SY
3092 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3093
e9d501b1
DR
3094 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
3095 lrbp = &hba->lrb[index];
3096 cmd = lrbp->cmd;
3097 if (cmd) {
3098 result = ufshcd_transfer_rsp_status(hba, lrbp);
3099 scsi_dma_unmap(cmd);
3100 cmd->result = result;
3101 /* Mark completed command as NULL in LRB */
3102 lrbp->cmd = NULL;
3103 clear_bit_unlock(index, &hba->lrb_in_use);
3104 /* Do not touch lrbp after scsi done */
3105 cmd->scsi_done(cmd);
1ab27c9c 3106 __ufshcd_release(hba);
e9d501b1
DR
3107 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
3108 if (hba->dev_cmd.complete)
3109 complete(hba->dev_cmd.complete);
3110 }
3111 }
7a3e97b0
SY
3112
3113 /* clear corresponding bits of completed commands */
3114 hba->outstanding_reqs ^= completed_reqs;
3115
856b3483
ST
3116 ufshcd_clk_scaling_update_busy(hba);
3117
5a0b0cb9
SRT
3118 /* we might have free'd some tags above */
3119 wake_up(&hba->dev_cmd.tag_wq);
7a3e97b0
SY
3120}
3121
66ec6d59
SRT
3122/**
3123 * ufshcd_disable_ee - disable exception event
3124 * @hba: per-adapter instance
3125 * @mask: exception event to disable
3126 *
3127 * Disables exception event in the device so that the EVENT_ALERT
3128 * bit is not set.
3129 *
3130 * Returns zero on success, non-zero error value on failure.
3131 */
3132static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
3133{
3134 int err = 0;
3135 u32 val;
3136
3137 if (!(hba->ee_ctrl_mask & mask))
3138 goto out;
3139
3140 val = hba->ee_ctrl_mask & ~mask;
3141 val &= 0xFFFF; /* 2 bytes */
3142 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
3143 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3144 if (!err)
3145 hba->ee_ctrl_mask &= ~mask;
3146out:
3147 return err;
3148}
3149
3150/**
3151 * ufshcd_enable_ee - enable exception event
3152 * @hba: per-adapter instance
3153 * @mask: exception event to enable
3154 *
3155 * Enable corresponding exception event in the device to allow
3156 * device to alert host in critical scenarios.
3157 *
3158 * Returns zero on success, non-zero error value on failure.
3159 */
3160static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
3161{
3162 int err = 0;
3163 u32 val;
3164
3165 if (hba->ee_ctrl_mask & mask)
3166 goto out;
3167
3168 val = hba->ee_ctrl_mask | mask;
3169 val &= 0xFFFF; /* 2 bytes */
3170 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
3171 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
3172 if (!err)
3173 hba->ee_ctrl_mask |= mask;
3174out:
3175 return err;
3176}
3177
3178/**
3179 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
3180 * @hba: per-adapter instance
3181 *
3182 * Allow device to manage background operations on its own. Enabling
3183 * this might lead to inconsistent latencies during normal data transfers
3184 * as the device is allowed to manage its own way of handling background
3185 * operations.
3186 *
3187 * Returns zero on success, non-zero on failure.
3188 */
3189static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
3190{
3191 int err = 0;
3192
3193 if (hba->auto_bkops_enabled)
3194 goto out;
3195
3196 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
3197 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3198 if (err) {
3199 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
3200 __func__, err);
3201 goto out;
3202 }
3203
3204 hba->auto_bkops_enabled = true;
3205
3206 /* No need of URGENT_BKOPS exception from the device */
3207 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3208 if (err)
3209 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
3210 __func__, err);
3211out:
3212 return err;
3213}
3214
3215/**
3216 * ufshcd_disable_auto_bkops - block device in doing background operations
3217 * @hba: per-adapter instance
3218 *
3219 * Disabling background operations improves command response latency but
3220 * has drawback of device moving into critical state where the device is
3221 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
3222 * host is idle so that BKOPS are managed effectively without any negative
3223 * impacts.
3224 *
3225 * Returns zero on success, non-zero on failure.
3226 */
3227static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
3228{
3229 int err = 0;
3230
3231 if (!hba->auto_bkops_enabled)
3232 goto out;
3233
3234 /*
3235 * If host assisted BKOPs is to be enabled, make sure
3236 * urgent bkops exception is allowed.
3237 */
3238 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
3239 if (err) {
3240 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
3241 __func__, err);
3242 goto out;
3243 }
3244
3245 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
3246 QUERY_FLAG_IDN_BKOPS_EN, NULL);
3247 if (err) {
3248 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
3249 __func__, err);
3250 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
3251 goto out;
3252 }
3253
3254 hba->auto_bkops_enabled = false;
3255out:
3256 return err;
3257}
3258
3259/**
3260 * ufshcd_force_reset_auto_bkops - force enable of auto bkops
3261 * @hba: per adapter instance
3262 *
3263 * After a device reset the device may toggle the BKOPS_EN flag
3264 * to default value. The s/w tracking variables should be updated
3265 * as well. Do this by forcing enable of auto bkops.
3266 */
3267static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
3268{
3269 hba->auto_bkops_enabled = false;
3270 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
3271 ufshcd_enable_auto_bkops(hba);
3272}
3273
3274static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
3275{
3276 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3277 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
3278}
3279
3280/**
57d104c1 3281 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
66ec6d59 3282 * @hba: per-adapter instance
57d104c1 3283 * @status: bkops_status value
66ec6d59 3284 *
57d104c1
SJ
3285 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
3286 * flag in the device to permit background operations if the device
3287 * bkops_status is greater than or equal to "status" argument passed to
3288 * this function, disable otherwise.
3289 *
3290 * Returns 0 for success, non-zero in case of failure.
3291 *
3292 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
3293 * to know whether auto bkops is enabled or disabled after this function
3294 * returns control to it.
66ec6d59 3295 */
57d104c1
SJ
3296static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
3297 enum bkops_status status)
66ec6d59
SRT
3298{
3299 int err;
57d104c1 3300 u32 curr_status = 0;
66ec6d59 3301
57d104c1 3302 err = ufshcd_get_bkops_status(hba, &curr_status);
66ec6d59
SRT
3303 if (err) {
3304 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3305 __func__, err);
3306 goto out;
57d104c1
SJ
3307 } else if (curr_status > BKOPS_STATUS_MAX) {
3308 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
3309 __func__, curr_status);
3310 err = -EINVAL;
3311 goto out;
66ec6d59
SRT
3312 }
3313
57d104c1 3314 if (curr_status >= status)
66ec6d59 3315 err = ufshcd_enable_auto_bkops(hba);
57d104c1
SJ
3316 else
3317 err = ufshcd_disable_auto_bkops(hba);
66ec6d59
SRT
3318out:
3319 return err;
3320}
3321
57d104c1
SJ
3322/**
3323 * ufshcd_urgent_bkops - handle urgent bkops exception event
3324 * @hba: per-adapter instance
3325 *
3326 * Enable fBackgroundOpsEn flag in the device to permit background
3327 * operations.
3328 *
3329 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
3330 * and negative error value for any other failure.
3331 */
3332static int ufshcd_urgent_bkops(struct ufs_hba *hba)
3333{
3334 return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
3335}
3336
66ec6d59
SRT
3337static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
3338{
3339 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3340 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
3341}
3342
3343/**
3344 * ufshcd_exception_event_handler - handle exceptions raised by device
3345 * @work: pointer to work data
3346 *
3347 * Read bExceptionEventStatus attribute from the device and handle the
3348 * exception event accordingly.
3349 */
3350static void ufshcd_exception_event_handler(struct work_struct *work)
3351{
3352 struct ufs_hba *hba;
3353 int err;
3354 u32 status = 0;
3355 hba = container_of(work, struct ufs_hba, eeh_work);
3356
62694735 3357 pm_runtime_get_sync(hba->dev);
66ec6d59
SRT
3358 err = ufshcd_get_ee_status(hba, &status);
3359 if (err) {
3360 dev_err(hba->dev, "%s: failed to get exception status %d\n",
3361 __func__, err);
3362 goto out;
3363 }
3364
3365 status &= hba->ee_ctrl_mask;
3366 if (status & MASK_EE_URGENT_BKOPS) {
3367 err = ufshcd_urgent_bkops(hba);
57d104c1 3368 if (err < 0)
66ec6d59
SRT
3369 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
3370 __func__, err);
3371 }
3372out:
62694735 3373 pm_runtime_put_sync(hba->dev);
66ec6d59
SRT
3374 return;
3375}
3376
7a3e97b0 3377/**
e8e7f271
SRT
3378 * ufshcd_err_handler - handle UFS errors that require s/w attention
3379 * @work: pointer to work structure
7a3e97b0 3380 */
e8e7f271 3381static void ufshcd_err_handler(struct work_struct *work)
7a3e97b0
SY
3382{
3383 struct ufs_hba *hba;
e8e7f271
SRT
3384 unsigned long flags;
3385 u32 err_xfer = 0;
3386 u32 err_tm = 0;
3387 int err = 0;
3388 int tag;
3389
3390 hba = container_of(work, struct ufs_hba, eh_work);
7a3e97b0 3391
62694735 3392 pm_runtime_get_sync(hba->dev);
1ab27c9c 3393 ufshcd_hold(hba, false);
e8e7f271
SRT
3394
3395 spin_lock_irqsave(hba->host->host_lock, flags);
3396 if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
3397 spin_unlock_irqrestore(hba->host->host_lock, flags);
3398 goto out;
3399 }
3400
3401 hba->ufshcd_state = UFSHCD_STATE_RESET;
3402 ufshcd_set_eh_in_progress(hba);
3403
3404 /* Complete requests that have door-bell cleared by h/w */
3405 ufshcd_transfer_req_compl(hba);
3406 ufshcd_tmc_handler(hba);
3407 spin_unlock_irqrestore(hba->host->host_lock, flags);
3408
3409 /* Clear pending transfer requests */
3410 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
3411 if (ufshcd_clear_cmd(hba, tag))
3412 err_xfer |= 1 << tag;
3413
3414 /* Clear pending task management requests */
3415 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
3416 if (ufshcd_clear_tm_cmd(hba, tag))
3417 err_tm |= 1 << tag;
3418
3419 /* Complete the requests that are cleared by s/w */
3420 spin_lock_irqsave(hba->host->host_lock, flags);
3421 ufshcd_transfer_req_compl(hba);
3422 ufshcd_tmc_handler(hba);
3423 spin_unlock_irqrestore(hba->host->host_lock, flags);
3424
3425 /* Fatal errors need reset */
3426 if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
3427 ((hba->saved_err & UIC_ERROR) &&
3428 (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
3429 err = ufshcd_reset_and_restore(hba);
3430 if (err) {
3431 dev_err(hba->dev, "%s: reset and restore failed\n",
3432 __func__);
3433 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3434 }
3435 /*
3436 * Inform scsi mid-layer that we did reset and allow to handle
3437 * Unit Attention properly.
3438 */
3439 scsi_report_bus_reset(hba->host, 0);
3440 hba->saved_err = 0;
3441 hba->saved_uic_err = 0;
3442 }
3443 ufshcd_clear_eh_in_progress(hba);
3444
3445out:
3446 scsi_unblock_requests(hba->host);
1ab27c9c 3447 ufshcd_release(hba);
62694735 3448 pm_runtime_put_sync(hba->dev);
7a3e97b0
SY
3449}
3450
3451/**
e8e7f271
SRT
3452 * ufshcd_update_uic_error - check and set fatal UIC error flags.
3453 * @hba: per-adapter instance
7a3e97b0 3454 */
e8e7f271 3455static void ufshcd_update_uic_error(struct ufs_hba *hba)
7a3e97b0
SY
3456{
3457 u32 reg;
3458
e8e7f271
SRT
3459 /* PA_INIT_ERROR is fatal and needs UIC reset */
3460 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
3461 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
3462 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
3463
3464 /* UIC NL/TL/DME errors needs software retry */
3465 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
3466 if (reg)
3467 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
3468
3469 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
3470 if (reg)
3471 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
3472
3473 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
3474 if (reg)
3475 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
3476
3477 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
3478 __func__, hba->uic_error);
3479}
3480
3481/**
3482 * ufshcd_check_errors - Check for errors that need s/w attention
3483 * @hba: per-adapter instance
3484 */
3485static void ufshcd_check_errors(struct ufs_hba *hba)
3486{
3487 bool queue_eh_work = false;
3488
7a3e97b0 3489 if (hba->errors & INT_FATAL_ERRORS)
e8e7f271 3490 queue_eh_work = true;
7a3e97b0
SY
3491
3492 if (hba->errors & UIC_ERROR) {
e8e7f271
SRT
3493 hba->uic_error = 0;
3494 ufshcd_update_uic_error(hba);
3495 if (hba->uic_error)
3496 queue_eh_work = true;
7a3e97b0 3497 }
e8e7f271
SRT
3498
3499 if (queue_eh_work) {
3500 /* handle fatal errors only when link is functional */
3501 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
3502 /* block commands from scsi mid-layer */
3503 scsi_block_requests(hba->host);
3504
3505 /* transfer error masks to sticky bits */
3506 hba->saved_err |= hba->errors;
3507 hba->saved_uic_err |= hba->uic_error;
3508
3509 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3510 schedule_work(&hba->eh_work);
3511 }
3441da7d 3512 }
e8e7f271
SRT
3513 /*
3514 * if (!queue_eh_work) -
3515 * Other errors are either non-fatal where host recovers
3516 * itself without s/w intervention or errors that will be
3517 * handled by the SCSI core layer.
3518 */
7a3e97b0
SY
3519}
3520
3521/**
3522 * ufshcd_tmc_handler - handle task management function completion
3523 * @hba: per adapter instance
3524 */
3525static void ufshcd_tmc_handler(struct ufs_hba *hba)
3526{
3527 u32 tm_doorbell;
3528
b873a275 3529 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
7a3e97b0 3530 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
e2933132 3531 wake_up(&hba->tm_wq);
7a3e97b0
SY
3532}
3533
3534/**
3535 * ufshcd_sl_intr - Interrupt service routine
3536 * @hba: per adapter instance
3537 * @intr_status: contains interrupts generated by the controller
3538 */
3539static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
3540{
3541 hba->errors = UFSHCD_ERROR_MASK & intr_status;
3542 if (hba->errors)
e8e7f271 3543 ufshcd_check_errors(hba);
7a3e97b0 3544
53b3d9c3
SJ
3545 if (intr_status & UFSHCD_UIC_MASK)
3546 ufshcd_uic_cmd_compl(hba, intr_status);
7a3e97b0
SY
3547
3548 if (intr_status & UTP_TASK_REQ_COMPL)
3549 ufshcd_tmc_handler(hba);
3550
3551 if (intr_status & UTP_TRANSFER_REQ_COMPL)
3552 ufshcd_transfer_req_compl(hba);
3553}
3554
3555/**
3556 * ufshcd_intr - Main interrupt service routine
3557 * @irq: irq number
3558 * @__hba: pointer to adapter instance
3559 *
3560 * Returns IRQ_HANDLED - If interrupt is valid
3561 * IRQ_NONE - If invalid interrupt
3562 */
3563static irqreturn_t ufshcd_intr(int irq, void *__hba)
3564{
3565 u32 intr_status;
3566 irqreturn_t retval = IRQ_NONE;
3567 struct ufs_hba *hba = __hba;
3568
3569 spin_lock(hba->host->host_lock);
b873a275 3570 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7a3e97b0
SY
3571
3572 if (intr_status) {
261ea452 3573 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
7a3e97b0 3574 ufshcd_sl_intr(hba, intr_status);
7a3e97b0
SY
3575 retval = IRQ_HANDLED;
3576 }
3577 spin_unlock(hba->host->host_lock);
3578 return retval;
3579}
3580
e2933132
SRT
3581static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
3582{
3583 int err = 0;
3584 u32 mask = 1 << tag;
3585 unsigned long flags;
3586
3587 if (!test_bit(tag, &hba->outstanding_tasks))
3588 goto out;
3589
3590 spin_lock_irqsave(hba->host->host_lock, flags);
3591 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
3592 spin_unlock_irqrestore(hba->host->host_lock, flags);
3593
3594 /* poll for max. 1 sec to clear door bell register by h/w */
3595 err = ufshcd_wait_for_register(hba,
3596 REG_UTP_TASK_REQ_DOOR_BELL,
3597 mask, 0, 1000, 1000);
3598out:
3599 return err;
3600}
3601
7a3e97b0
SY
3602/**
3603 * ufshcd_issue_tm_cmd - issues task management commands to controller
3604 * @hba: per adapter instance
e2933132
SRT
3605 * @lun_id: LUN ID to which TM command is sent
3606 * @task_id: task ID to which the TM command is applicable
3607 * @tm_function: task management function opcode
3608 * @tm_response: task management service response return value
7a3e97b0 3609 *
e2933132 3610 * Returns non-zero value on error, zero on success.
7a3e97b0 3611 */
e2933132
SRT
3612static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
3613 u8 tm_function, u8 *tm_response)
7a3e97b0
SY
3614{
3615 struct utp_task_req_desc *task_req_descp;
3616 struct utp_upiu_task_req *task_req_upiup;
3617 struct Scsi_Host *host;
3618 unsigned long flags;
e2933132 3619 int free_slot;
7a3e97b0 3620 int err;
e2933132 3621 int task_tag;
7a3e97b0
SY
3622
3623 host = hba->host;
3624
e2933132
SRT
3625 /*
3626 * Get free slot, sleep if slots are unavailable.
3627 * Even though we use wait_event() which sleeps indefinitely,
3628 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
3629 */
3630 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
1ab27c9c 3631 ufshcd_hold(hba, false);
7a3e97b0 3632
e2933132 3633 spin_lock_irqsave(host->host_lock, flags);
7a3e97b0
SY
3634 task_req_descp = hba->utmrdl_base_addr;
3635 task_req_descp += free_slot;
3636
3637 /* Configure task request descriptor */
3638 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
3639 task_req_descp->header.dword_2 =
3640 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
3641
3642 /* Configure task request UPIU */
3643 task_req_upiup =
3644 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
e2933132 3645 task_tag = hba->nutrs + free_slot;
7a3e97b0 3646 task_req_upiup->header.dword_0 =
5a0b0cb9 3647 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
e2933132 3648 lun_id, task_tag);
7a3e97b0 3649 task_req_upiup->header.dword_1 =
5a0b0cb9 3650 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
0ce147d4
SJ
3651 /*
3652 * The host shall provide the same value for LUN field in the basic
3653 * header and for Input Parameter.
3654 */
e2933132
SRT
3655 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
3656 task_req_upiup->input_param2 = cpu_to_be32(task_id);
7a3e97b0
SY
3657
3658 /* send command to the controller */
3659 __set_bit(free_slot, &hba->outstanding_tasks);
b873a275 3660 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
7a3e97b0
SY
3661
3662 spin_unlock_irqrestore(host->host_lock, flags);
3663
3664 /* wait until the task management command is completed */
e2933132
SRT
3665 err = wait_event_timeout(hba->tm_wq,
3666 test_bit(free_slot, &hba->tm_condition),
3667 msecs_to_jiffies(TM_CMD_TIMEOUT));
7a3e97b0 3668 if (!err) {
e2933132
SRT
3669 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
3670 __func__, tm_function);
3671 if (ufshcd_clear_tm_cmd(hba, free_slot))
3672 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
3673 __func__, free_slot);
3674 err = -ETIMEDOUT;
3675 } else {
3676 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
7a3e97b0 3677 }
e2933132 3678
7a3e97b0 3679 clear_bit(free_slot, &hba->tm_condition);
e2933132
SRT
3680 ufshcd_put_tm_slot(hba, free_slot);
3681 wake_up(&hba->tm_tag_wq);
3682
1ab27c9c 3683 ufshcd_release(hba);
7a3e97b0
SY
3684 return err;
3685}
3686
3687/**
3441da7d
SRT
3688 * ufshcd_eh_device_reset_handler - device reset handler registered to
3689 * scsi layer.
7a3e97b0
SY
3690 * @cmd: SCSI command pointer
3691 *
3692 * Returns SUCCESS/FAILED
3693 */
3441da7d 3694static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7a3e97b0
SY
3695{
3696 struct Scsi_Host *host;
3697 struct ufs_hba *hba;
3698 unsigned int tag;
3699 u32 pos;
3700 int err;
e2933132
SRT
3701 u8 resp = 0xF;
3702 struct ufshcd_lrb *lrbp;
3441da7d 3703 unsigned long flags;
7a3e97b0
SY
3704
3705 host = cmd->device->host;
3706 hba = shost_priv(host);
3707 tag = cmd->request->tag;
3708
e2933132
SRT
3709 lrbp = &hba->lrb[tag];
3710 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
3711 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3441da7d
SRT
3712 if (!err)
3713 err = resp;
7a3e97b0 3714 goto out;
e2933132 3715 }
7a3e97b0 3716
3441da7d
SRT
3717 /* clear the commands that were pending for corresponding LUN */
3718 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
3719 if (hba->lrb[pos].lun == lrbp->lun) {
3720 err = ufshcd_clear_cmd(hba, pos);
3721 if (err)
3722 break;
7a3e97b0 3723 }
3441da7d
SRT
3724 }
3725 spin_lock_irqsave(host->host_lock, flags);
3726 ufshcd_transfer_req_compl(hba);
3727 spin_unlock_irqrestore(host->host_lock, flags);
7a3e97b0 3728out:
3441da7d
SRT
3729 if (!err) {
3730 err = SUCCESS;
3731 } else {
3732 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
3733 err = FAILED;
3734 }
7a3e97b0
SY
3735 return err;
3736}
3737
7a3e97b0
SY
3738/**
3739 * ufshcd_abort - abort a specific command
3740 * @cmd: SCSI command pointer
3741 *
f20810d8
SRT
3742 * Abort the pending command in device by sending UFS_ABORT_TASK task management
3743 * command, and in host controller by clearing the door-bell register. There can
3744 * be race between controller sending the command to the device while abort is
3745 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
3746 * really issued and then try to abort it.
3747 *
7a3e97b0
SY
3748 * Returns SUCCESS/FAILED
3749 */
3750static int ufshcd_abort(struct scsi_cmnd *cmd)
3751{
3752 struct Scsi_Host *host;
3753 struct ufs_hba *hba;
3754 unsigned long flags;
3755 unsigned int tag;
f20810d8
SRT
3756 int err = 0;
3757 int poll_cnt;
e2933132
SRT
3758 u8 resp = 0xF;
3759 struct ufshcd_lrb *lrbp;
e9d501b1 3760 u32 reg;
7a3e97b0
SY
3761
3762 host = cmd->device->host;
3763 hba = shost_priv(host);
3764 tag = cmd->request->tag;
3765
1ab27c9c 3766 ufshcd_hold(hba, false);
f20810d8
SRT
3767 /* If command is already aborted/completed, return SUCCESS */
3768 if (!(test_bit(tag, &hba->outstanding_reqs)))
3769 goto out;
7a3e97b0 3770
e9d501b1
DR
3771 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3772 if (!(reg & (1 << tag))) {
3773 dev_err(hba->dev,
3774 "%s: cmd was completed, but without a notifying intr, tag = %d",
3775 __func__, tag);
3776 }
3777
f20810d8
SRT
3778 lrbp = &hba->lrb[tag];
3779 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
3780 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
3781 UFS_QUERY_TASK, &resp);
3782 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
3783 /* cmd pending in the device */
3784 break;
3785 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
f20810d8
SRT
3786 /*
3787 * cmd not pending in the device, check if it is
3788 * in transition.
3789 */
3790 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3791 if (reg & (1 << tag)) {
3792 /* sleep for max. 200us to stabilize */
3793 usleep_range(100, 200);
3794 continue;
3795 }
3796 /* command completed already */
3797 goto out;
3798 } else {
3799 if (!err)
3800 err = resp; /* service response error */
3801 goto out;
3802 }
3803 }
3804
3805 if (!poll_cnt) {
3806 err = -EBUSY;
7a3e97b0
SY
3807 goto out;
3808 }
7a3e97b0 3809
e2933132
SRT
3810 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
3811 UFS_ABORT_TASK, &resp);
3812 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
f20810d8
SRT
3813 if (!err)
3814 err = resp; /* service response error */
7a3e97b0 3815 goto out;
e2933132 3816 }
7a3e97b0 3817
f20810d8
SRT
3818 err = ufshcd_clear_cmd(hba, tag);
3819 if (err)
3820 goto out;
3821
7a3e97b0
SY
3822 scsi_dma_unmap(cmd);
3823
3824 spin_lock_irqsave(host->host_lock, flags);
7a3e97b0
SY
3825 __clear_bit(tag, &hba->outstanding_reqs);
3826 hba->lrb[tag].cmd = NULL;
3827 spin_unlock_irqrestore(host->host_lock, flags);
5a0b0cb9
SRT
3828
3829 clear_bit_unlock(tag, &hba->lrb_in_use);
3830 wake_up(&hba->dev_cmd.tag_wq);
1ab27c9c 3831
7a3e97b0 3832out:
f20810d8
SRT
3833 if (!err) {
3834 err = SUCCESS;
3835 } else {
3836 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
3837 err = FAILED;
3838 }
3839
1ab27c9c
ST
3840 /*
3841 * This ufshcd_release() corresponds to the original scsi cmd that got
3842 * aborted here (as we won't get any IRQ for it).
3843 */
3844 ufshcd_release(hba);
7a3e97b0
SY
3845 return err;
3846}
3847
3441da7d
SRT
3848/**
3849 * ufshcd_host_reset_and_restore - reset and restore host controller
3850 * @hba: per-adapter instance
3851 *
3852 * Note that host controller reset may issue DME_RESET to
3853 * local and remote (device) Uni-Pro stack and the attributes
3854 * are reset to default state.
3855 *
3856 * Returns zero on success, non-zero on failure
3857 */
3858static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
3859{
3860 int err;
3441da7d
SRT
3861 unsigned long flags;
3862
3863 /* Reset the host controller */
3864 spin_lock_irqsave(hba->host->host_lock, flags);
3865 ufshcd_hba_stop(hba);
3866 spin_unlock_irqrestore(hba->host->host_lock, flags);
3867
3868 err = ufshcd_hba_enable(hba);
3869 if (err)
3870 goto out;
3871
3872 /* Establish the link again and restore the device */
1d337ec2
SRT
3873 err = ufshcd_probe_hba(hba);
3874
3875 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
3441da7d
SRT
3876 err = -EIO;
3877out:
3878 if (err)
3879 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
3880
3881 return err;
3882}
3883
3884/**
3885 * ufshcd_reset_and_restore - reset and re-initialize host/device
3886 * @hba: per-adapter instance
3887 *
3888 * Reset and recover device, host and re-establish link. This
3889 * is helpful to recover the communication in fatal error conditions.
3890 *
3891 * Returns zero on success, non-zero on failure
3892 */
3893static int ufshcd_reset_and_restore(struct ufs_hba *hba)
3894{
3895 int err = 0;
3896 unsigned long flags;
1d337ec2 3897 int retries = MAX_HOST_RESET_RETRIES;
3441da7d 3898
1d337ec2
SRT
3899 do {
3900 err = ufshcd_host_reset_and_restore(hba);
3901 } while (err && --retries);
3441da7d
SRT
3902
3903 /*
3904 * After reset the door-bell might be cleared, complete
3905 * outstanding requests in s/w here.
3906 */
3907 spin_lock_irqsave(hba->host->host_lock, flags);
3908 ufshcd_transfer_req_compl(hba);
3909 ufshcd_tmc_handler(hba);
3910 spin_unlock_irqrestore(hba->host->host_lock, flags);
3911
3912 return err;
3913}
3914
3915/**
3916 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
3917 * @cmd - SCSI command pointer
3918 *
3919 * Returns SUCCESS/FAILED
3920 */
3921static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
3922{
3923 int err;
3924 unsigned long flags;
3925 struct ufs_hba *hba;
3926
3927 hba = shost_priv(cmd->device->host);
3928
1ab27c9c 3929 ufshcd_hold(hba, false);
3441da7d
SRT
3930 /*
3931 * Check if there is any race with fatal error handling.
3932 * If so, wait for it to complete. Even though fatal error
3933 * handling does reset and restore in some cases, don't assume
3934 * anything out of it. We are just avoiding race here.
3935 */
3936 do {
3937 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271 3938 if (!(work_pending(&hba->eh_work) ||
3441da7d
SRT
3939 hba->ufshcd_state == UFSHCD_STATE_RESET))
3940 break;
3941 spin_unlock_irqrestore(hba->host->host_lock, flags);
3942 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
e8e7f271 3943 flush_work(&hba->eh_work);
3441da7d
SRT
3944 } while (1);
3945
3946 hba->ufshcd_state = UFSHCD_STATE_RESET;
3947 ufshcd_set_eh_in_progress(hba);
3948 spin_unlock_irqrestore(hba->host->host_lock, flags);
3949
3950 err = ufshcd_reset_and_restore(hba);
3951
3952 spin_lock_irqsave(hba->host->host_lock, flags);
3953 if (!err) {
3954 err = SUCCESS;
3955 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
3956 } else {
3957 err = FAILED;
3958 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3959 }
3960 ufshcd_clear_eh_in_progress(hba);
3961 spin_unlock_irqrestore(hba->host->host_lock, flags);
3962
1ab27c9c 3963 ufshcd_release(hba);
3441da7d
SRT
3964 return err;
3965}
3966
3a4bf06d
YG
3967/**
3968 * ufshcd_get_max_icc_level - calculate the ICC level
3969 * @sup_curr_uA: max. current supported by the regulator
3970 * @start_scan: row at the desc table to start scan from
3971 * @buff: power descriptor buffer
3972 *
3973 * Returns calculated max ICC level for specific regulator
3974 */
3975static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
3976{
3977 int i;
3978 int curr_uA;
3979 u16 data;
3980 u16 unit;
3981
3982 for (i = start_scan; i >= 0; i--) {
3983 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
3984 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
3985 ATTR_ICC_LVL_UNIT_OFFSET;
3986 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
3987 switch (unit) {
3988 case UFSHCD_NANO_AMP:
3989 curr_uA = curr_uA / 1000;
3990 break;
3991 case UFSHCD_MILI_AMP:
3992 curr_uA = curr_uA * 1000;
3993 break;
3994 case UFSHCD_AMP:
3995 curr_uA = curr_uA * 1000 * 1000;
3996 break;
3997 case UFSHCD_MICRO_AMP:
3998 default:
3999 break;
4000 }
4001 if (sup_curr_uA >= curr_uA)
4002 break;
4003 }
4004 if (i < 0) {
4005 i = 0;
4006 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
4007 }
4008
4009 return (u32)i;
4010}
4011
4012/**
4013 * ufshcd_calc_icc_level - calculate the max ICC level
4014 * In case regulators are not initialized we'll return 0
4015 * @hba: per-adapter instance
4016 * @desc_buf: power descriptor buffer to extract ICC levels from.
4017 * @len: length of desc_buff
4018 *
4019 * Returns calculated ICC level
4020 */
4021static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
4022 u8 *desc_buf, int len)
4023{
4024 u32 icc_level = 0;
4025
4026 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
4027 !hba->vreg_info.vccq2) {
4028 dev_err(hba->dev,
4029 "%s: Regulator capability was not set, actvIccLevel=%d",
4030 __func__, icc_level);
4031 goto out;
4032 }
4033
4034 if (hba->vreg_info.vcc)
4035 icc_level = ufshcd_get_max_icc_level(
4036 hba->vreg_info.vcc->max_uA,
4037 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
4038 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
4039
4040 if (hba->vreg_info.vccq)
4041 icc_level = ufshcd_get_max_icc_level(
4042 hba->vreg_info.vccq->max_uA,
4043 icc_level,
4044 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
4045
4046 if (hba->vreg_info.vccq2)
4047 icc_level = ufshcd_get_max_icc_level(
4048 hba->vreg_info.vccq2->max_uA,
4049 icc_level,
4050 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
4051out:
4052 return icc_level;
4053}
4054
4055static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4056{
4057 int ret;
4058 int buff_len = QUERY_DESC_POWER_MAX_SIZE;
4059 u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
4060
4061 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
4062 if (ret) {
4063 dev_err(hba->dev,
4064 "%s: Failed reading power descriptor.len = %d ret = %d",
4065 __func__, buff_len, ret);
4066 return;
4067 }
4068
4069 hba->init_prefetch_data.icc_level =
4070 ufshcd_find_max_sup_active_icc_level(hba,
4071 desc_buf, buff_len);
4072 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
4073 __func__, hba->init_prefetch_data.icc_level);
4074
4075 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4076 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
4077 &hba->init_prefetch_data.icc_level);
4078
4079 if (ret)
4080 dev_err(hba->dev,
4081 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
4082 __func__, hba->init_prefetch_data.icc_level , ret);
4083
4084}
4085
2a8fa600
SJ
4086/**
4087 * ufshcd_scsi_add_wlus - Adds required W-LUs
4088 * @hba: per-adapter instance
4089 *
4090 * UFS device specification requires the UFS devices to support 4 well known
4091 * logical units:
4092 * "REPORT_LUNS" (address: 01h)
4093 * "UFS Device" (address: 50h)
4094 * "RPMB" (address: 44h)
4095 * "BOOT" (address: 30h)
4096 * UFS device's power management needs to be controlled by "POWER CONDITION"
4097 * field of SSU (START STOP UNIT) command. But this "power condition" field
4098 * will take effect only when its sent to "UFS device" well known logical unit
4099 * hence we require the scsi_device instance to represent this logical unit in
4100 * order for the UFS host driver to send the SSU command for power management.
4101
4102 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
4103 * Block) LU so user space process can control this LU. User space may also
4104 * want to have access to BOOT LU.
4105
4106 * This function adds scsi device instances for each of all well known LUs
4107 * (except "REPORT LUNS" LU).
4108 *
4109 * Returns zero on success (all required W-LUs are added successfully),
4110 * non-zero error value on failure (if failed to add any of the required W-LU).
4111 */
4112static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4113{
4114 int ret = 0;
7c48bfd0
AM
4115 struct scsi_device *sdev_rpmb;
4116 struct scsi_device *sdev_boot;
2a8fa600
SJ
4117
4118 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
4119 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
4120 if (IS_ERR(hba->sdev_ufs_device)) {
4121 ret = PTR_ERR(hba->sdev_ufs_device);
4122 hba->sdev_ufs_device = NULL;
4123 goto out;
4124 }
7c48bfd0 4125 scsi_device_put(hba->sdev_ufs_device);
2a8fa600 4126
7c48bfd0 4127 sdev_boot = __scsi_add_device(hba->host, 0, 0,
2a8fa600 4128 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
7c48bfd0
AM
4129 if (IS_ERR(sdev_boot)) {
4130 ret = PTR_ERR(sdev_boot);
2a8fa600
SJ
4131 goto remove_sdev_ufs_device;
4132 }
7c48bfd0 4133 scsi_device_put(sdev_boot);
2a8fa600 4134
7c48bfd0 4135 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
2a8fa600 4136 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
7c48bfd0
AM
4137 if (IS_ERR(sdev_rpmb)) {
4138 ret = PTR_ERR(sdev_rpmb);
2a8fa600
SJ
4139 goto remove_sdev_boot;
4140 }
7c48bfd0 4141 scsi_device_put(sdev_rpmb);
2a8fa600
SJ
4142 goto out;
4143
4144remove_sdev_boot:
7c48bfd0 4145 scsi_remove_device(sdev_boot);
2a8fa600
SJ
4146remove_sdev_ufs_device:
4147 scsi_remove_device(hba->sdev_ufs_device);
4148out:
4149 return ret;
4150}
4151
6ccf44fe 4152/**
1d337ec2
SRT
4153 * ufshcd_probe_hba - probe hba to detect device and initialize
4154 * @hba: per-adapter instance
4155 *
4156 * Execute link-startup and verify device initialization
6ccf44fe 4157 */
1d337ec2 4158static int ufshcd_probe_hba(struct ufs_hba *hba)
6ccf44fe 4159{
6ccf44fe
SJ
4160 int ret;
4161
4162 ret = ufshcd_link_startup(hba);
5a0b0cb9
SRT
4163 if (ret)
4164 goto out;
4165
5064636c
YG
4166 ufshcd_init_pwr_info(hba);
4167
57d104c1
SJ
4168 /* UniPro link is active now */
4169 ufshcd_set_link_active(hba);
d3e89bac 4170
5a0b0cb9
SRT
4171 ret = ufshcd_verify_dev_init(hba);
4172 if (ret)
4173 goto out;
68078d5c
DR
4174
4175 ret = ufshcd_complete_dev_init(hba);
4176 if (ret)
4177 goto out;
5a0b0cb9 4178
57d104c1
SJ
4179 /* UFS device is also active now */
4180 ufshcd_set_ufs_dev_active(hba);
66ec6d59 4181 ufshcd_force_reset_auto_bkops(hba);
3441da7d 4182 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
57d104c1
SJ
4183 hba->wlun_dev_clr_ua = true;
4184
7eb584db
DR
4185 if (ufshcd_get_max_pwr_mode(hba)) {
4186 dev_err(hba->dev,
4187 "%s: Failed getting max supported power mode\n",
4188 __func__);
4189 } else {
4190 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
4191 if (ret)
4192 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
4193 __func__, ret);
4194 }
57d104c1
SJ
4195
4196 /*
4197 * If we are in error handling context or in power management callbacks
4198 * context, no need to scan the host
4199 */
4200 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
4201 bool flag;
4202
4203 /* clear any previous UFS device information */
4204 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
4205 if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4206 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
4207 hba->dev_info.f_power_on_wp_en = flag;
3441da7d 4208
3a4bf06d
YG
4209 if (!hba->is_init_prefetch)
4210 ufshcd_init_icc_levels(hba);
4211
2a8fa600
SJ
4212 /* Add required well known logical units to scsi mid layer */
4213 if (ufshcd_scsi_add_wlus(hba))
4214 goto out;
4215
3441da7d
SRT
4216 scsi_scan_host(hba->host);
4217 pm_runtime_put_sync(hba->dev);
4218 }
3a4bf06d
YG
4219
4220 if (!hba->is_init_prefetch)
4221 hba->is_init_prefetch = true;
4222
856b3483
ST
4223 /* Resume devfreq after UFS device is detected */
4224 if (ufshcd_is_clkscaling_enabled(hba))
4225 devfreq_resume_device(hba->devfreq);
4226
5a0b0cb9 4227out:
1d337ec2
SRT
4228 /*
4229 * If we failed to initialize the device or the device is not
4230 * present, turn off the power/clocks etc.
4231 */
57d104c1
SJ
4232 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
4233 pm_runtime_put_sync(hba->dev);
1d337ec2 4234 ufshcd_hba_exit(hba);
57d104c1 4235 }
1d337ec2
SRT
4236
4237 return ret;
4238}
4239
4240/**
4241 * ufshcd_async_scan - asynchronous execution for probing hba
4242 * @data: data pointer to pass to this function
4243 * @cookie: cookie data
4244 */
4245static void ufshcd_async_scan(void *data, async_cookie_t cookie)
4246{
4247 struct ufs_hba *hba = (struct ufs_hba *)data;
4248
4249 ufshcd_probe_hba(hba);
6ccf44fe
SJ
4250}
4251
7a3e97b0
SY
4252static struct scsi_host_template ufshcd_driver_template = {
4253 .module = THIS_MODULE,
4254 .name = UFSHCD,
4255 .proc_name = UFSHCD,
4256 .queuecommand = ufshcd_queuecommand,
4257 .slave_alloc = ufshcd_slave_alloc,
eeda4749 4258 .slave_configure = ufshcd_slave_configure,
7a3e97b0 4259 .slave_destroy = ufshcd_slave_destroy,
4264fd61 4260 .change_queue_depth = ufshcd_change_queue_depth,
7a3e97b0 4261 .eh_abort_handler = ufshcd_abort,
3441da7d
SRT
4262 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
4263 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
7a3e97b0
SY
4264 .this_id = -1,
4265 .sg_tablesize = SG_ALL,
4266 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
4267 .can_queue = UFSHCD_CAN_QUEUE,
1ab27c9c 4268 .max_host_blocked = 1,
2ecb204d 4269 .use_blk_tags = 1,
c40ecc12 4270 .track_queue_depth = 1,
7a3e97b0
SY
4271};
4272
57d104c1
SJ
4273static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
4274 int ua)
4275{
7b16a07c 4276 int ret;
57d104c1 4277
7b16a07c
BA
4278 if (!vreg)
4279 return 0;
57d104c1 4280
7b16a07c
BA
4281 ret = regulator_set_load(vreg->reg, ua);
4282 if (ret < 0) {
4283 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
4284 __func__, vreg->name, ua, ret);
57d104c1
SJ
4285 }
4286
4287 return ret;
4288}
4289
4290static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
4291 struct ufs_vreg *vreg)
4292{
4293 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
4294}
4295
4296static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
4297 struct ufs_vreg *vreg)
4298{
4299 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
4300}
4301
aa497613
SRT
4302static int ufshcd_config_vreg(struct device *dev,
4303 struct ufs_vreg *vreg, bool on)
4304{
4305 int ret = 0;
4306 struct regulator *reg = vreg->reg;
4307 const char *name = vreg->name;
4308 int min_uV, uA_load;
4309
4310 BUG_ON(!vreg);
4311
4312 if (regulator_count_voltages(reg) > 0) {
4313 min_uV = on ? vreg->min_uV : 0;
4314 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
4315 if (ret) {
4316 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
4317 __func__, name, ret);
4318 goto out;
4319 }
4320
4321 uA_load = on ? vreg->max_uA : 0;
57d104c1
SJ
4322 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
4323 if (ret)
aa497613 4324 goto out;
aa497613
SRT
4325 }
4326out:
4327 return ret;
4328}
4329
4330static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
4331{
4332 int ret = 0;
4333
4334 if (!vreg || vreg->enabled)
4335 goto out;
4336
4337 ret = ufshcd_config_vreg(dev, vreg, true);
4338 if (!ret)
4339 ret = regulator_enable(vreg->reg);
4340
4341 if (!ret)
4342 vreg->enabled = true;
4343 else
4344 dev_err(dev, "%s: %s enable failed, err=%d\n",
4345 __func__, vreg->name, ret);
4346out:
4347 return ret;
4348}
4349
4350static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
4351{
4352 int ret = 0;
4353
4354 if (!vreg || !vreg->enabled)
4355 goto out;
4356
4357 ret = regulator_disable(vreg->reg);
4358
4359 if (!ret) {
4360 /* ignore errors on applying disable config */
4361 ufshcd_config_vreg(dev, vreg, false);
4362 vreg->enabled = false;
4363 } else {
4364 dev_err(dev, "%s: %s disable failed, err=%d\n",
4365 __func__, vreg->name, ret);
4366 }
4367out:
4368 return ret;
4369}
4370
4371static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
4372{
4373 int ret = 0;
4374 struct device *dev = hba->dev;
4375 struct ufs_vreg_info *info = &hba->vreg_info;
4376
4377 if (!info)
4378 goto out;
4379
4380 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
4381 if (ret)
4382 goto out;
4383
4384 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
4385 if (ret)
4386 goto out;
4387
4388 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
4389 if (ret)
4390 goto out;
4391
4392out:
4393 if (ret) {
4394 ufshcd_toggle_vreg(dev, info->vccq2, false);
4395 ufshcd_toggle_vreg(dev, info->vccq, false);
4396 ufshcd_toggle_vreg(dev, info->vcc, false);
4397 }
4398 return ret;
4399}
4400
6a771a65
RS
4401static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
4402{
4403 struct ufs_vreg_info *info = &hba->vreg_info;
4404
4405 if (info)
4406 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
4407
4408 return 0;
4409}
4410
aa497613
SRT
4411static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
4412{
4413 int ret = 0;
4414
4415 if (!vreg)
4416 goto out;
4417
4418 vreg->reg = devm_regulator_get(dev, vreg->name);
4419 if (IS_ERR(vreg->reg)) {
4420 ret = PTR_ERR(vreg->reg);
4421 dev_err(dev, "%s: %s get failed, err=%d\n",
4422 __func__, vreg->name, ret);
4423 }
4424out:
4425 return ret;
4426}
4427
4428static int ufshcd_init_vreg(struct ufs_hba *hba)
4429{
4430 int ret = 0;
4431 struct device *dev = hba->dev;
4432 struct ufs_vreg_info *info = &hba->vreg_info;
4433
4434 if (!info)
4435 goto out;
4436
4437 ret = ufshcd_get_vreg(dev, info->vcc);
4438 if (ret)
4439 goto out;
4440
4441 ret = ufshcd_get_vreg(dev, info->vccq);
4442 if (ret)
4443 goto out;
4444
4445 ret = ufshcd_get_vreg(dev, info->vccq2);
4446out:
4447 return ret;
4448}
4449
6a771a65
RS
4450static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
4451{
4452 struct ufs_vreg_info *info = &hba->vreg_info;
4453
4454 if (info)
4455 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
4456
4457 return 0;
4458}
4459
57d104c1
SJ
4460static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
4461 bool skip_ref_clk)
c6e79dac
SRT
4462{
4463 int ret = 0;
4464 struct ufs_clk_info *clki;
4465 struct list_head *head = &hba->clk_list_head;
1ab27c9c 4466 unsigned long flags;
c6e79dac
SRT
4467
4468 if (!head || list_empty(head))
4469 goto out;
4470
4471 list_for_each_entry(clki, head, list) {
4472 if (!IS_ERR_OR_NULL(clki->clk)) {
57d104c1
SJ
4473 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
4474 continue;
4475
c6e79dac
SRT
4476 if (on && !clki->enabled) {
4477 ret = clk_prepare_enable(clki->clk);
4478 if (ret) {
4479 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
4480 __func__, clki->name, ret);
4481 goto out;
4482 }
4483 } else if (!on && clki->enabled) {
4484 clk_disable_unprepare(clki->clk);
4485 }
4486 clki->enabled = on;
4487 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
4488 clki->name, on ? "en" : "dis");
4489 }
4490 }
1ab27c9c
ST
4491
4492 if (hba->vops && hba->vops->setup_clocks)
4493 ret = hba->vops->setup_clocks(hba, on);
c6e79dac
SRT
4494out:
4495 if (ret) {
4496 list_for_each_entry(clki, head, list) {
4497 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
4498 clk_disable_unprepare(clki->clk);
4499 }
eda910e4 4500 } else if (on) {
1ab27c9c
ST
4501 spin_lock_irqsave(hba->host->host_lock, flags);
4502 hba->clk_gating.state = CLKS_ON;
4503 spin_unlock_irqrestore(hba->host->host_lock, flags);
c6e79dac
SRT
4504 }
4505 return ret;
4506}
4507
57d104c1
SJ
4508static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
4509{
4510 return __ufshcd_setup_clocks(hba, on, false);
4511}
4512
c6e79dac
SRT
4513static int ufshcd_init_clocks(struct ufs_hba *hba)
4514{
4515 int ret = 0;
4516 struct ufs_clk_info *clki;
4517 struct device *dev = hba->dev;
4518 struct list_head *head = &hba->clk_list_head;
4519
4520 if (!head || list_empty(head))
4521 goto out;
4522
4523 list_for_each_entry(clki, head, list) {
4524 if (!clki->name)
4525 continue;
4526
4527 clki->clk = devm_clk_get(dev, clki->name);
4528 if (IS_ERR(clki->clk)) {
4529 ret = PTR_ERR(clki->clk);
4530 dev_err(dev, "%s: %s clk get failed, %d\n",
4531 __func__, clki->name, ret);
4532 goto out;
4533 }
4534
4535 if (clki->max_freq) {
4536 ret = clk_set_rate(clki->clk, clki->max_freq);
4537 if (ret) {
4538 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
4539 __func__, clki->name,
4540 clki->max_freq, ret);
4541 goto out;
4542 }
856b3483 4543 clki->curr_freq = clki->max_freq;
c6e79dac
SRT
4544 }
4545 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
4546 clki->name, clk_get_rate(clki->clk));
4547 }
4548out:
4549 return ret;
4550}
4551
5c0c28a8
SRT
4552static int ufshcd_variant_hba_init(struct ufs_hba *hba)
4553{
4554 int err = 0;
4555
4556 if (!hba->vops)
4557 goto out;
4558
4559 if (hba->vops->init) {
4560 err = hba->vops->init(hba);
4561 if (err)
4562 goto out;
4563 }
4564
5c0c28a8
SRT
4565 if (hba->vops->setup_regulators) {
4566 err = hba->vops->setup_regulators(hba, true);
4567 if (err)
1ab27c9c 4568 goto out_exit;
5c0c28a8
SRT
4569 }
4570
4571 goto out;
4572
5c0c28a8
SRT
4573out_exit:
4574 if (hba->vops->exit)
4575 hba->vops->exit(hba);
4576out:
4577 if (err)
4578 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
4579 __func__, hba->vops ? hba->vops->name : "", err);
4580 return err;
4581}
4582
4583static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
4584{
4585 if (!hba->vops)
4586 return;
4587
4588 if (hba->vops->setup_clocks)
4589 hba->vops->setup_clocks(hba, false);
4590
4591 if (hba->vops->setup_regulators)
4592 hba->vops->setup_regulators(hba, false);
4593
4594 if (hba->vops->exit)
4595 hba->vops->exit(hba);
4596}
4597
aa497613
SRT
4598static int ufshcd_hba_init(struct ufs_hba *hba)
4599{
4600 int err;
4601
6a771a65
RS
4602 /*
4603 * Handle host controller power separately from the UFS device power
4604 * rails as it will help controlling the UFS host controller power
4605 * collapse easily which is different than UFS device power collapse.
4606 * Also, enable the host controller power before we go ahead with rest
4607 * of the initialization here.
4608 */
4609 err = ufshcd_init_hba_vreg(hba);
aa497613
SRT
4610 if (err)
4611 goto out;
4612
6a771a65 4613 err = ufshcd_setup_hba_vreg(hba, true);
aa497613
SRT
4614 if (err)
4615 goto out;
4616
6a771a65
RS
4617 err = ufshcd_init_clocks(hba);
4618 if (err)
4619 goto out_disable_hba_vreg;
4620
4621 err = ufshcd_setup_clocks(hba, true);
4622 if (err)
4623 goto out_disable_hba_vreg;
4624
c6e79dac
SRT
4625 err = ufshcd_init_vreg(hba);
4626 if (err)
4627 goto out_disable_clks;
4628
4629 err = ufshcd_setup_vreg(hba, true);
4630 if (err)
4631 goto out_disable_clks;
4632
aa497613
SRT
4633 err = ufshcd_variant_hba_init(hba);
4634 if (err)
4635 goto out_disable_vreg;
4636
1d337ec2 4637 hba->is_powered = true;
aa497613
SRT
4638 goto out;
4639
4640out_disable_vreg:
4641 ufshcd_setup_vreg(hba, false);
c6e79dac
SRT
4642out_disable_clks:
4643 ufshcd_setup_clocks(hba, false);
6a771a65
RS
4644out_disable_hba_vreg:
4645 ufshcd_setup_hba_vreg(hba, false);
aa497613
SRT
4646out:
4647 return err;
4648}
4649
4650static void ufshcd_hba_exit(struct ufs_hba *hba)
4651{
1d337ec2
SRT
4652 if (hba->is_powered) {
4653 ufshcd_variant_hba_exit(hba);
4654 ufshcd_setup_vreg(hba, false);
4655 ufshcd_setup_clocks(hba, false);
4656 ufshcd_setup_hba_vreg(hba, false);
4657 hba->is_powered = false;
4658 }
aa497613
SRT
4659}
4660
57d104c1
SJ
4661static int
4662ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
4663{
4664 unsigned char cmd[6] = {REQUEST_SENSE,
4665 0,
4666 0,
4667 0,
4668 SCSI_SENSE_BUFFERSIZE,
4669 0};
4670 char *buffer;
4671 int ret;
4672
4673 buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
4674 if (!buffer) {
4675 ret = -ENOMEM;
4676 goto out;
4677 }
4678
4679 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
4680 SCSI_SENSE_BUFFERSIZE, NULL,
4681 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
4682 if (ret)
4683 pr_err("%s: failed with err %d\n", __func__, ret);
4684
4685 kfree(buffer);
4686out:
4687 return ret;
4688}
4689
4690/**
4691 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
4692 * power mode
4693 * @hba: per adapter instance
4694 * @pwr_mode: device power mode to set
4695 *
4696 * Returns 0 if requested power mode is set successfully
4697 * Returns non-zero if failed to set the requested power mode
4698 */
4699static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
4700 enum ufs_dev_pwr_mode pwr_mode)
4701{
4702 unsigned char cmd[6] = { START_STOP };
4703 struct scsi_sense_hdr sshdr;
7c48bfd0
AM
4704 struct scsi_device *sdp;
4705 unsigned long flags;
57d104c1
SJ
4706 int ret;
4707
7c48bfd0
AM
4708 spin_lock_irqsave(hba->host->host_lock, flags);
4709 sdp = hba->sdev_ufs_device;
4710 if (sdp) {
4711 ret = scsi_device_get(sdp);
4712 if (!ret && !scsi_device_online(sdp)) {
4713 ret = -ENODEV;
4714 scsi_device_put(sdp);
4715 }
4716 } else {
4717 ret = -ENODEV;
4718 }
4719 spin_unlock_irqrestore(hba->host->host_lock, flags);
4720
4721 if (ret)
4722 return ret;
57d104c1
SJ
4723
4724 /*
4725 * If scsi commands fail, the scsi mid-layer schedules scsi error-
4726 * handling, which would wait for host to be resumed. Since we know
4727 * we are functional while we are here, skip host resume in error
4728 * handling context.
4729 */
4730 hba->host->eh_noresume = 1;
4731 if (hba->wlun_dev_clr_ua) {
4732 ret = ufshcd_send_request_sense(hba, sdp);
4733 if (ret)
4734 goto out;
4735 /* Unit attention condition is cleared now */
4736 hba->wlun_dev_clr_ua = false;
4737 }
4738
4739 cmd[4] = pwr_mode << 4;
4740
4741 /*
4742 * Current function would be generally called from the power management
4743 * callbacks hence set the REQ_PM flag so that it doesn't resume the
4744 * already suspended childs.
4745 */
4746 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
4747 START_STOP_TIMEOUT, 0, NULL, REQ_PM);
4748 if (ret) {
4749 sdev_printk(KERN_WARNING, sdp,
ef61329d
HR
4750 "START_STOP failed for power mode: %d, result %x\n",
4751 pwr_mode, ret);
21045519
HR
4752 if (driver_byte(ret) & DRIVER_SENSE)
4753 scsi_print_sense_hdr(sdp, NULL, &sshdr);
57d104c1
SJ
4754 }
4755
4756 if (!ret)
4757 hba->curr_dev_pwr_mode = pwr_mode;
4758out:
7c48bfd0 4759 scsi_device_put(sdp);
57d104c1
SJ
4760 hba->host->eh_noresume = 0;
4761 return ret;
4762}
4763
4764static int ufshcd_link_state_transition(struct ufs_hba *hba,
4765 enum uic_link_state req_link_state,
4766 int check_for_bkops)
4767{
4768 int ret = 0;
4769
4770 if (req_link_state == hba->uic_link_state)
4771 return 0;
4772
4773 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
4774 ret = ufshcd_uic_hibern8_enter(hba);
4775 if (!ret)
4776 ufshcd_set_link_hibern8(hba);
4777 else
4778 goto out;
4779 }
4780 /*
4781 * If autobkops is enabled, link can't be turned off because
4782 * turning off the link would also turn off the device.
4783 */
4784 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
4785 (!check_for_bkops || (check_for_bkops &&
4786 !hba->auto_bkops_enabled))) {
4787 /*
4788 * Change controller state to "reset state" which
4789 * should also put the link in off/reset state
4790 */
4791 ufshcd_hba_stop(hba);
4792 /*
4793 * TODO: Check if we need any delay to make sure that
4794 * controller is reset
4795 */
4796 ufshcd_set_link_off(hba);
4797 }
4798
4799out:
4800 return ret;
4801}
4802
4803static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
4804{
4805 /*
4806 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
4807 * power.
4808 *
4809 * If UFS device and link is in OFF state, all power supplies (VCC,
4810 * VCCQ, VCCQ2) can be turned off if power on write protect is not
4811 * required. If UFS link is inactive (Hibern8 or OFF state) and device
4812 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
4813 *
4814 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
4815 * in low power state which would save some power.
4816 */
4817 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
4818 !hba->dev_info.is_lu_power_on_wp) {
4819 ufshcd_setup_vreg(hba, false);
4820 } else if (!ufshcd_is_ufs_dev_active(hba)) {
4821 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
4822 if (!ufshcd_is_link_active(hba)) {
4823 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
4824 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
4825 }
4826 }
4827}
4828
4829static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
4830{
4831 int ret = 0;
4832
4833 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
4834 !hba->dev_info.is_lu_power_on_wp) {
4835 ret = ufshcd_setup_vreg(hba, true);
4836 } else if (!ufshcd_is_ufs_dev_active(hba)) {
4837 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
4838 if (!ret && !ufshcd_is_link_active(hba)) {
4839 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
4840 if (ret)
4841 goto vcc_disable;
4842 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
4843 if (ret)
4844 goto vccq_lpm;
4845 }
4846 }
4847 goto out;
4848
4849vccq_lpm:
4850 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
4851vcc_disable:
4852 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
4853out:
4854 return ret;
4855}
4856
4857static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
4858{
4859 if (ufshcd_is_link_off(hba))
4860 ufshcd_setup_hba_vreg(hba, false);
4861}
4862
4863static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
4864{
4865 if (ufshcd_is_link_off(hba))
4866 ufshcd_setup_hba_vreg(hba, true);
4867}
4868
7a3e97b0 4869/**
57d104c1 4870 * ufshcd_suspend - helper function for suspend operations
3b1d0580 4871 * @hba: per adapter instance
57d104c1
SJ
4872 * @pm_op: desired low power operation type
4873 *
4874 * This function will try to put the UFS device and link into low power
4875 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
4876 * (System PM level).
4877 *
4878 * If this function is called during shutdown, it will make sure that
4879 * both UFS device and UFS link is powered off.
7a3e97b0 4880 *
57d104c1
SJ
4881 * NOTE: UFS device & link must be active before we enter in this function.
4882 *
4883 * Returns 0 for success and non-zero for failure
7a3e97b0 4884 */
57d104c1 4885static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 4886{
57d104c1
SJ
4887 int ret = 0;
4888 enum ufs_pm_level pm_lvl;
4889 enum ufs_dev_pwr_mode req_dev_pwr_mode;
4890 enum uic_link_state req_link_state;
4891
4892 hba->pm_op_in_progress = 1;
4893 if (!ufshcd_is_shutdown_pm(pm_op)) {
4894 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
4895 hba->rpm_lvl : hba->spm_lvl;
4896 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
4897 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
4898 } else {
4899 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
4900 req_link_state = UIC_LINK_OFF_STATE;
4901 }
4902
7a3e97b0 4903 /*
57d104c1
SJ
4904 * If we can't transition into any of the low power modes
4905 * just gate the clocks.
7a3e97b0 4906 */
1ab27c9c
ST
4907 ufshcd_hold(hba, false);
4908 hba->clk_gating.is_suspended = true;
4909
57d104c1
SJ
4910 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
4911 req_link_state == UIC_LINK_ACTIVE_STATE) {
4912 goto disable_clks;
4913 }
7a3e97b0 4914
57d104c1
SJ
4915 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
4916 (req_link_state == hba->uic_link_state))
4917 goto out;
4918
4919 /* UFS device & link must be active before we enter in this function */
4920 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
4921 ret = -EINVAL;
4922 goto out;
4923 }
4924
4925 if (ufshcd_is_runtime_pm(pm_op)) {
374a246e
SJ
4926 if (ufshcd_can_autobkops_during_suspend(hba)) {
4927 /*
4928 * The device is idle with no requests in the queue,
4929 * allow background operations if bkops status shows
4930 * that performance might be impacted.
4931 */
4932 ret = ufshcd_urgent_bkops(hba);
4933 if (ret)
4934 goto enable_gating;
4935 } else {
4936 /* make sure that auto bkops is disabled */
4937 ufshcd_disable_auto_bkops(hba);
4938 }
57d104c1
SJ
4939 }
4940
4941 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
4942 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
4943 !ufshcd_is_runtime_pm(pm_op))) {
4944 /* ensure that bkops is disabled */
4945 ufshcd_disable_auto_bkops(hba);
4946 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
4947 if (ret)
1ab27c9c 4948 goto enable_gating;
57d104c1
SJ
4949 }
4950
4951 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
4952 if (ret)
4953 goto set_dev_active;
4954
4955 ufshcd_vreg_set_lpm(hba);
4956
4957disable_clks:
856b3483
ST
4958 /*
4959 * The clock scaling needs access to controller registers. Hence, Wait
4960 * for pending clock scaling work to be done before clocks are
4961 * turned off.
4962 */
4963 if (ufshcd_is_clkscaling_enabled(hba)) {
4964 devfreq_suspend_device(hba->devfreq);
4965 hba->clk_scaling.window_start_t = 0;
4966 }
57d104c1
SJ
4967 /*
4968 * Call vendor specific suspend callback. As these callbacks may access
4969 * vendor specific host controller register space call them before the
4970 * host clocks are ON.
4971 */
4972 if (hba->vops && hba->vops->suspend) {
4973 ret = hba->vops->suspend(hba, pm_op);
4974 if (ret)
4975 goto set_link_active;
4976 }
4977
4978 if (hba->vops && hba->vops->setup_clocks) {
4979 ret = hba->vops->setup_clocks(hba, false);
4980 if (ret)
4981 goto vops_resume;
4982 }
4983
4984 if (!ufshcd_is_link_active(hba))
4985 ufshcd_setup_clocks(hba, false);
4986 else
4987 /* If link is active, device ref_clk can't be switched off */
4988 __ufshcd_setup_clocks(hba, false, true);
4989
1ab27c9c 4990 hba->clk_gating.state = CLKS_OFF;
57d104c1
SJ
4991 /*
4992 * Disable the host irq as host controller as there won't be any
4993 * host controller trasanction expected till resume.
4994 */
4995 ufshcd_disable_irq(hba);
4996 /* Put the host controller in low power mode if possible */
4997 ufshcd_hba_vreg_set_lpm(hba);
4998 goto out;
4999
5000vops_resume:
5001 if (hba->vops && hba->vops->resume)
5002 hba->vops->resume(hba, pm_op);
5003set_link_active:
5004 ufshcd_vreg_set_hpm(hba);
5005 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
5006 ufshcd_set_link_active(hba);
5007 else if (ufshcd_is_link_off(hba))
5008 ufshcd_host_reset_and_restore(hba);
5009set_dev_active:
5010 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
5011 ufshcd_disable_auto_bkops(hba);
1ab27c9c
ST
5012enable_gating:
5013 hba->clk_gating.is_suspended = false;
5014 ufshcd_release(hba);
57d104c1
SJ
5015out:
5016 hba->pm_op_in_progress = 0;
5017 return ret;
7a3e97b0
SY
5018}
5019
5020/**
57d104c1 5021 * ufshcd_resume - helper function for resume operations
3b1d0580 5022 * @hba: per adapter instance
57d104c1 5023 * @pm_op: runtime PM or system PM
7a3e97b0 5024 *
57d104c1
SJ
5025 * This function basically brings the UFS device, UniPro link and controller
5026 * to active state.
5027 *
5028 * Returns 0 for success and non-zero for failure
7a3e97b0 5029 */
57d104c1 5030static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7a3e97b0 5031{
57d104c1
SJ
5032 int ret;
5033 enum uic_link_state old_link_state;
5034
5035 hba->pm_op_in_progress = 1;
5036 old_link_state = hba->uic_link_state;
5037
5038 ufshcd_hba_vreg_set_hpm(hba);
5039 /* Make sure clocks are enabled before accessing controller */
5040 ret = ufshcd_setup_clocks(hba, true);
5041 if (ret)
5042 goto out;
5043
57d104c1
SJ
5044 /* enable the host irq as host controller would be active soon */
5045 ret = ufshcd_enable_irq(hba);
5046 if (ret)
5047 goto disable_irq_and_vops_clks;
5048
5049 ret = ufshcd_vreg_set_hpm(hba);
5050 if (ret)
5051 goto disable_irq_and_vops_clks;
5052
7a3e97b0 5053 /*
57d104c1
SJ
5054 * Call vendor specific resume callback. As these callbacks may access
5055 * vendor specific host controller register space call them when the
5056 * host clocks are ON.
7a3e97b0 5057 */
57d104c1
SJ
5058 if (hba->vops && hba->vops->resume) {
5059 ret = hba->vops->resume(hba, pm_op);
5060 if (ret)
5061 goto disable_vreg;
5062 }
5063
5064 if (ufshcd_is_link_hibern8(hba)) {
5065 ret = ufshcd_uic_hibern8_exit(hba);
5066 if (!ret)
5067 ufshcd_set_link_active(hba);
5068 else
5069 goto vendor_suspend;
5070 } else if (ufshcd_is_link_off(hba)) {
5071 ret = ufshcd_host_reset_and_restore(hba);
5072 /*
5073 * ufshcd_host_reset_and_restore() should have already
5074 * set the link state as active
5075 */
5076 if (ret || !ufshcd_is_link_active(hba))
5077 goto vendor_suspend;
5078 }
5079
5080 if (!ufshcd_is_ufs_dev_active(hba)) {
5081 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
5082 if (ret)
5083 goto set_old_link_state;
5084 }
5085
374a246e
SJ
5086 /*
5087 * If BKOPs operations are urgently needed at this moment then
5088 * keep auto-bkops enabled or else disable it.
5089 */
5090 ufshcd_urgent_bkops(hba);
1ab27c9c
ST
5091 hba->clk_gating.is_suspended = false;
5092
856b3483
ST
5093 if (ufshcd_is_clkscaling_enabled(hba))
5094 devfreq_resume_device(hba->devfreq);
5095
1ab27c9c
ST
5096 /* Schedule clock gating in case of no access to UFS device yet */
5097 ufshcd_release(hba);
57d104c1
SJ
5098 goto out;
5099
5100set_old_link_state:
5101 ufshcd_link_state_transition(hba, old_link_state, 0);
5102vendor_suspend:
5103 if (hba->vops && hba->vops->suspend)
5104 hba->vops->suspend(hba, pm_op);
5105disable_vreg:
5106 ufshcd_vreg_set_lpm(hba);
5107disable_irq_and_vops_clks:
5108 ufshcd_disable_irq(hba);
57d104c1
SJ
5109 ufshcd_setup_clocks(hba, false);
5110out:
5111 hba->pm_op_in_progress = 0;
5112 return ret;
5113}
5114
5115/**
5116 * ufshcd_system_suspend - system suspend routine
5117 * @hba: per adapter instance
5118 * @pm_op: runtime PM or system PM
5119 *
5120 * Check the description of ufshcd_suspend() function for more details.
5121 *
5122 * Returns 0 for success and non-zero for failure
5123 */
5124int ufshcd_system_suspend(struct ufs_hba *hba)
5125{
5126 int ret = 0;
5127
5128 if (!hba || !hba->is_powered)
233b594b 5129 return 0;
57d104c1
SJ
5130
5131 if (pm_runtime_suspended(hba->dev)) {
5132 if (hba->rpm_lvl == hba->spm_lvl)
5133 /*
5134 * There is possibility that device may still be in
5135 * active state during the runtime suspend.
5136 */
5137 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
5138 hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
5139 goto out;
5140
5141 /*
5142 * UFS device and/or UFS link low power states during runtime
5143 * suspend seems to be different than what is expected during
5144 * system suspend. Hence runtime resume the devic & link and
5145 * let the system suspend low power states to take effect.
5146 * TODO: If resume takes longer time, we might have optimize
5147 * it in future by not resuming everything if possible.
5148 */
5149 ret = ufshcd_runtime_resume(hba);
5150 if (ret)
5151 goto out;
5152 }
5153
5154 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
5155out:
e785060e
DR
5156 if (!ret)
5157 hba->is_sys_suspended = true;
57d104c1
SJ
5158 return ret;
5159}
5160EXPORT_SYMBOL(ufshcd_system_suspend);
5161
5162/**
5163 * ufshcd_system_resume - system resume routine
5164 * @hba: per adapter instance
5165 *
5166 * Returns 0 for success and non-zero for failure
5167 */
7a3e97b0 5168
57d104c1
SJ
5169int ufshcd_system_resume(struct ufs_hba *hba)
5170{
5171 if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
5172 /*
5173 * Let the runtime resume take care of resuming
5174 * if runtime suspended.
5175 */
5176 return 0;
5177
5178 return ufshcd_resume(hba, UFS_SYSTEM_PM);
7a3e97b0 5179}
57d104c1 5180EXPORT_SYMBOL(ufshcd_system_resume);
3b1d0580 5181
57d104c1
SJ
5182/**
5183 * ufshcd_runtime_suspend - runtime suspend routine
5184 * @hba: per adapter instance
5185 *
5186 * Check the description of ufshcd_suspend() function for more details.
5187 *
5188 * Returns 0 for success and non-zero for failure
5189 */
66ec6d59
SRT
5190int ufshcd_runtime_suspend(struct ufs_hba *hba)
5191{
57d104c1 5192 if (!hba || !hba->is_powered)
66ec6d59
SRT
5193 return 0;
5194
57d104c1 5195 return ufshcd_suspend(hba, UFS_RUNTIME_PM);
66ec6d59
SRT
5196}
5197EXPORT_SYMBOL(ufshcd_runtime_suspend);
5198
57d104c1
SJ
5199/**
5200 * ufshcd_runtime_resume - runtime resume routine
5201 * @hba: per adapter instance
5202 *
5203 * This function basically brings the UFS device, UniPro link and controller
5204 * to active state. Following operations are done in this function:
5205 *
5206 * 1. Turn on all the controller related clocks
5207 * 2. Bring the UniPro link out of Hibernate state
5208 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
5209 * to active state.
5210 * 4. If auto-bkops is enabled on the device, disable it.
5211 *
5212 * So following would be the possible power state after this function return
5213 * successfully:
5214 * S1: UFS device in Active state with VCC rail ON
5215 * UniPro link in Active state
5216 * All the UFS/UniPro controller clocks are ON
5217 *
5218 * Returns 0 for success and non-zero for failure
5219 */
66ec6d59
SRT
5220int ufshcd_runtime_resume(struct ufs_hba *hba)
5221{
57d104c1 5222 if (!hba || !hba->is_powered)
66ec6d59 5223 return 0;
57d104c1
SJ
5224 else
5225 return ufshcd_resume(hba, UFS_RUNTIME_PM);
66ec6d59
SRT
5226}
5227EXPORT_SYMBOL(ufshcd_runtime_resume);
5228
5229int ufshcd_runtime_idle(struct ufs_hba *hba)
5230{
5231 return 0;
5232}
5233EXPORT_SYMBOL(ufshcd_runtime_idle);
5234
57d104c1
SJ
5235/**
5236 * ufshcd_shutdown - shutdown routine
5237 * @hba: per adapter instance
5238 *
5239 * This function would power off both UFS device and UFS link.
5240 *
5241 * Returns 0 always to allow force shutdown even in case of errors.
5242 */
5243int ufshcd_shutdown(struct ufs_hba *hba)
5244{
5245 int ret = 0;
5246
5247 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
5248 goto out;
5249
5250 if (pm_runtime_suspended(hba->dev)) {
5251 ret = ufshcd_runtime_resume(hba);
5252 if (ret)
5253 goto out;
5254 }
5255
5256 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
5257out:
5258 if (ret)
5259 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
5260 /* allow force shutdown even in case of errors */
5261 return 0;
5262}
5263EXPORT_SYMBOL(ufshcd_shutdown);
5264
7a3e97b0 5265/**
3b1d0580 5266 * ufshcd_remove - de-allocate SCSI host and host memory space
7a3e97b0 5267 * data structure memory
3b1d0580 5268 * @hba - per adapter instance
7a3e97b0 5269 */
3b1d0580 5270void ufshcd_remove(struct ufs_hba *hba)
7a3e97b0 5271{
cfdf9c91 5272 scsi_remove_host(hba->host);
7a3e97b0 5273 /* disable interrupts */
2fbd009b 5274 ufshcd_disable_intr(hba, hba->intr_mask);
7a3e97b0 5275 ufshcd_hba_stop(hba);
7a3e97b0 5276
7a3e97b0 5277 scsi_host_put(hba->host);
5c0c28a8 5278
1ab27c9c 5279 ufshcd_exit_clk_gating(hba);
856b3483
ST
5280 if (ufshcd_is_clkscaling_enabled(hba))
5281 devfreq_remove_device(hba->devfreq);
aa497613 5282 ufshcd_hba_exit(hba);
3b1d0580
VH
5283}
5284EXPORT_SYMBOL_GPL(ufshcd_remove);
5285
ca3d7bf9
AM
5286/**
5287 * ufshcd_set_dma_mask - Set dma mask based on the controller
5288 * addressing capability
5289 * @hba: per adapter instance
5290 *
5291 * Returns 0 for success, non-zero for failure
5292 */
5293static int ufshcd_set_dma_mask(struct ufs_hba *hba)
5294{
5295 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
5296 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
5297 return 0;
5298 }
5299 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
5300}
5301
7a3e97b0 5302/**
5c0c28a8 5303 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
3b1d0580
VH
5304 * @dev: pointer to device handle
5305 * @hba_handle: driver private handle
7a3e97b0
SY
5306 * Returns 0 on success, non-zero value on failure
5307 */
5c0c28a8 5308int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7a3e97b0
SY
5309{
5310 struct Scsi_Host *host;
5311 struct ufs_hba *hba;
5c0c28a8 5312 int err = 0;
7a3e97b0 5313
3b1d0580
VH
5314 if (!dev) {
5315 dev_err(dev,
5316 "Invalid memory reference for dev is NULL\n");
5317 err = -ENODEV;
7a3e97b0
SY
5318 goto out_error;
5319 }
5320
7a3e97b0
SY
5321 host = scsi_host_alloc(&ufshcd_driver_template,
5322 sizeof(struct ufs_hba));
5323 if (!host) {
3b1d0580 5324 dev_err(dev, "scsi_host_alloc failed\n");
7a3e97b0 5325 err = -ENOMEM;
3b1d0580 5326 goto out_error;
7a3e97b0
SY
5327 }
5328 hba = shost_priv(host);
7a3e97b0 5329 hba->host = host;
3b1d0580 5330 hba->dev = dev;
5c0c28a8
SRT
5331 *hba_handle = hba;
5332
5333out_error:
5334 return err;
5335}
5336EXPORT_SYMBOL(ufshcd_alloc_host);
5337
856b3483
ST
5338static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
5339{
5340 int ret = 0;
5341 struct ufs_clk_info *clki;
5342 struct list_head *head = &hba->clk_list_head;
5343
5344 if (!head || list_empty(head))
5345 goto out;
5346
5347 list_for_each_entry(clki, head, list) {
5348 if (!IS_ERR_OR_NULL(clki->clk)) {
5349 if (scale_up && clki->max_freq) {
5350 if (clki->curr_freq == clki->max_freq)
5351 continue;
5352 ret = clk_set_rate(clki->clk, clki->max_freq);
5353 if (ret) {
5354 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5355 __func__, clki->name,
5356 clki->max_freq, ret);
5357 break;
5358 }
5359 clki->curr_freq = clki->max_freq;
5360
5361 } else if (!scale_up && clki->min_freq) {
5362 if (clki->curr_freq == clki->min_freq)
5363 continue;
5364 ret = clk_set_rate(clki->clk, clki->min_freq);
5365 if (ret) {
5366 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5367 __func__, clki->name,
5368 clki->min_freq, ret);
5369 break;
5370 }
5371 clki->curr_freq = clki->min_freq;
5372 }
5373 }
5374 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
5375 clki->name, clk_get_rate(clki->clk));
5376 }
5377 if (hba->vops->clk_scale_notify)
5378 hba->vops->clk_scale_notify(hba);
5379out:
5380 return ret;
5381}
5382
5383static int ufshcd_devfreq_target(struct device *dev,
5384 unsigned long *freq, u32 flags)
5385{
5386 int err = 0;
5387 struct ufs_hba *hba = dev_get_drvdata(dev);
5388
5389 if (!ufshcd_is_clkscaling_enabled(hba))
5390 return -EINVAL;
5391
5392 if (*freq == UINT_MAX)
5393 err = ufshcd_scale_clks(hba, true);
5394 else if (*freq == 0)
5395 err = ufshcd_scale_clks(hba, false);
5396
5397 return err;
5398}
5399
5400static int ufshcd_devfreq_get_dev_status(struct device *dev,
5401 struct devfreq_dev_status *stat)
5402{
5403 struct ufs_hba *hba = dev_get_drvdata(dev);
5404 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
5405 unsigned long flags;
5406
5407 if (!ufshcd_is_clkscaling_enabled(hba))
5408 return -EINVAL;
5409
5410 memset(stat, 0, sizeof(*stat));
5411
5412 spin_lock_irqsave(hba->host->host_lock, flags);
5413 if (!scaling->window_start_t)
5414 goto start_window;
5415
5416 if (scaling->is_busy_started)
5417 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
5418 scaling->busy_start_t));
5419
5420 stat->total_time = jiffies_to_usecs((long)jiffies -
5421 (long)scaling->window_start_t);
5422 stat->busy_time = scaling->tot_busy_t;
5423start_window:
5424 scaling->window_start_t = jiffies;
5425 scaling->tot_busy_t = 0;
5426
5427 if (hba->outstanding_reqs) {
5428 scaling->busy_start_t = ktime_get();
5429 scaling->is_busy_started = true;
5430 } else {
5431 scaling->busy_start_t = ktime_set(0, 0);
5432 scaling->is_busy_started = false;
5433 }
5434 spin_unlock_irqrestore(hba->host->host_lock, flags);
5435 return 0;
5436}
5437
5438static struct devfreq_dev_profile ufs_devfreq_profile = {
5439 .polling_ms = 100,
5440 .target = ufshcd_devfreq_target,
5441 .get_dev_status = ufshcd_devfreq_get_dev_status,
5442};
5443
5c0c28a8
SRT
5444/**
5445 * ufshcd_init - Driver initialization routine
5446 * @hba: per-adapter instance
5447 * @mmio_base: base register address
5448 * @irq: Interrupt line of device
5449 * Returns 0 on success, non-zero value on failure
5450 */
5451int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
5452{
5453 int err;
5454 struct Scsi_Host *host = hba->host;
5455 struct device *dev = hba->dev;
5456
5457 if (!mmio_base) {
5458 dev_err(hba->dev,
5459 "Invalid memory reference for mmio_base is NULL\n");
5460 err = -ENODEV;
5461 goto out_error;
5462 }
5463
3b1d0580
VH
5464 hba->mmio_base = mmio_base;
5465 hba->irq = irq;
7a3e97b0 5466
aa497613 5467 err = ufshcd_hba_init(hba);
5c0c28a8
SRT
5468 if (err)
5469 goto out_error;
5470
7a3e97b0
SY
5471 /* Read capabilities registers */
5472 ufshcd_hba_capabilities(hba);
5473
5474 /* Get UFS version supported by the controller */
5475 hba->ufs_version = ufshcd_get_ufs_version(hba);
5476
2fbd009b
SJ
5477 /* Get Interrupt bit mask per version */
5478 hba->intr_mask = ufshcd_get_intr_mask(hba);
5479
ca3d7bf9
AM
5480 err = ufshcd_set_dma_mask(hba);
5481 if (err) {
5482 dev_err(hba->dev, "set dma mask failed\n");
5483 goto out_disable;
5484 }
5485
7a3e97b0
SY
5486 /* Allocate memory for host memory space */
5487 err = ufshcd_memory_alloc(hba);
5488 if (err) {
3b1d0580
VH
5489 dev_err(hba->dev, "Memory allocation failed\n");
5490 goto out_disable;
7a3e97b0
SY
5491 }
5492
5493 /* Configure LRB */
5494 ufshcd_host_memory_configure(hba);
5495
5496 host->can_queue = hba->nutrs;
5497 host->cmd_per_lun = hba->nutrs;
5498 host->max_id = UFSHCD_MAX_ID;
0ce147d4 5499 host->max_lun = UFS_MAX_LUNS;
7a3e97b0
SY
5500 host->max_channel = UFSHCD_MAX_CHANNEL;
5501 host->unique_id = host->host_no;
5502 host->max_cmd_len = MAX_CDB_SIZE;
5503
7eb584db
DR
5504 hba->max_pwr_info.is_valid = false;
5505
7a3e97b0 5506 /* Initailize wait queue for task management */
e2933132
SRT
5507 init_waitqueue_head(&hba->tm_wq);
5508 init_waitqueue_head(&hba->tm_tag_wq);
7a3e97b0
SY
5509
5510 /* Initialize work queues */
e8e7f271 5511 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
66ec6d59 5512 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
7a3e97b0 5513
6ccf44fe
SJ
5514 /* Initialize UIC command mutex */
5515 mutex_init(&hba->uic_cmd_mutex);
5516
5a0b0cb9
SRT
5517 /* Initialize mutex for device management commands */
5518 mutex_init(&hba->dev_cmd.lock);
5519
5520 /* Initialize device management tag acquire wait queue */
5521 init_waitqueue_head(&hba->dev_cmd.tag_wq);
5522
1ab27c9c 5523 ufshcd_init_clk_gating(hba);
7a3e97b0 5524 /* IRQ registration */
2953f850 5525 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7a3e97b0 5526 if (err) {
3b1d0580 5527 dev_err(hba->dev, "request irq failed\n");
1ab27c9c 5528 goto exit_gating;
57d104c1
SJ
5529 } else {
5530 hba->is_irq_enabled = true;
7a3e97b0
SY
5531 }
5532
5533 /* Enable SCSI tag mapping */
5534 err = scsi_init_shared_tag_map(host, host->can_queue);
5535 if (err) {
3b1d0580 5536 dev_err(hba->dev, "init shared queue failed\n");
1ab27c9c 5537 goto exit_gating;
7a3e97b0
SY
5538 }
5539
3b1d0580 5540 err = scsi_add_host(host, hba->dev);
7a3e97b0 5541 if (err) {
3b1d0580 5542 dev_err(hba->dev, "scsi_add_host failed\n");
1ab27c9c 5543 goto exit_gating;
7a3e97b0
SY
5544 }
5545
6ccf44fe
SJ
5546 /* Host controller enable */
5547 err = ufshcd_hba_enable(hba);
7a3e97b0 5548 if (err) {
6ccf44fe 5549 dev_err(hba->dev, "Host controller enable failed\n");
3b1d0580 5550 goto out_remove_scsi_host;
7a3e97b0 5551 }
6ccf44fe 5552
856b3483
ST
5553 if (ufshcd_is_clkscaling_enabled(hba)) {
5554 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
5555 "simple_ondemand", NULL);
5556 if (IS_ERR(hba->devfreq)) {
5557 dev_err(hba->dev, "Unable to register with devfreq %ld\n",
5558 PTR_ERR(hba->devfreq));
5559 goto out_remove_scsi_host;
5560 }
5561 /* Suspend devfreq until the UFS device is detected */
5562 devfreq_suspend_device(hba->devfreq);
5563 hba->clk_scaling.window_start_t = 0;
5564 }
5565
62694735
SRT
5566 /* Hold auto suspend until async scan completes */
5567 pm_runtime_get_sync(dev);
5568
57d104c1
SJ
5569 /*
5570 * The device-initialize-sequence hasn't been invoked yet.
5571 * Set the device to power-off state
5572 */
5573 ufshcd_set_ufs_dev_poweroff(hba);
5574
6ccf44fe
SJ
5575 async_schedule(ufshcd_async_scan, hba);
5576
7a3e97b0
SY
5577 return 0;
5578
3b1d0580
VH
5579out_remove_scsi_host:
5580 scsi_remove_host(hba->host);
1ab27c9c
ST
5581exit_gating:
5582 ufshcd_exit_clk_gating(hba);
3b1d0580 5583out_disable:
57d104c1 5584 hba->is_irq_enabled = false;
3b1d0580 5585 scsi_host_put(host);
aa497613 5586 ufshcd_hba_exit(hba);
3b1d0580
VH
5587out_error:
5588 return err;
5589}
5590EXPORT_SYMBOL_GPL(ufshcd_init);
5591
3b1d0580
VH
5592MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
5593MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
e0eca63e 5594MODULE_DESCRIPTION("Generic UFS host controller driver Core");
7a3e97b0
SY
5595MODULE_LICENSE("GPL");
5596MODULE_VERSION(UFSHCD_DRIVER_VERSION);