]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/ufs/ufshcd.c
[SCSI] ufs: add operation for the uic power mode change
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / ufs / ufshcd.c
CommitLineData
7a3e97b0 1/*
e0eca63e 2 * Universal Flash Storage Host controller driver Core
7a3e97b0
SY
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
3b1d0580 5 * Copyright (C) 2011-2013 Samsung India Software Operations
7a3e97b0 6 *
3b1d0580
VH
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
7a3e97b0
SY
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
3b1d0580
VH
15 * See the COPYING file in the top-level directory or visit
16 * <http://www.gnu.org/licenses/gpl-2.0.html>
7a3e97b0
SY
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
3b1d0580
VH
23 * This program is provided "AS IS" and "WITH ALL FAULTS" and
24 * without warranty of any kind. You are solely responsible for
25 * determining the appropriateness of using and distributing
26 * the program and assume all risks associated with your exercise
27 * of rights with respect to the program, including but not limited
28 * to infringement of third party rights, the risks and costs of
29 * program errors, damage to or loss of data, programs or equipment,
30 * and unavailability or interruption of operations. Under no
31 * circumstances will the contributor of this Program be liable for
32 * any damages of any kind arising from your use or distribution of
33 * this program.
7a3e97b0
SY
34 */
35
6ccf44fe
SJ
36#include <linux/async.h>
37
e0eca63e 38#include "ufshcd.h"
53b3d9c3 39#include "unipro.h"
7a3e97b0 40
2fbd009b
SJ
41#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
42 UTP_TASK_REQ_COMPL |\
53b3d9c3 43 UIC_POWER_MODE |\
2fbd009b 44 UFSHCD_ERROR_MASK)
6ccf44fe
SJ
45/* UIC command timeout, unit: ms */
46#define UIC_CMD_TIMEOUT 500
2fbd009b 47
5a0b0cb9
SRT
48/* NOP OUT retries waiting for NOP IN response */
49#define NOP_OUT_RETRIES 10
50/* Timeout after 30 msecs if NOP OUT hangs without response */
51#define NOP_OUT_TIMEOUT 30 /* msecs */
52
68078d5c
DR
53/* Query request retries */
54#define QUERY_REQ_RETRIES 10
55/* Query request timeout */
56#define QUERY_REQ_TIMEOUT 30 /* msec */
57
58/* Expose the flag value from utp_upiu_query.value */
59#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
60
7d568652
SJ
61/* Interrupt aggregation default timeout, unit: 40us */
62#define INT_AGGR_DEF_TO 0x02
63
7a3e97b0
SY
64enum {
65 UFSHCD_MAX_CHANNEL = 0,
66 UFSHCD_MAX_ID = 1,
67 UFSHCD_MAX_LUNS = 8,
68 UFSHCD_CMD_PER_LUN = 32,
69 UFSHCD_CAN_QUEUE = 32,
70};
71
72/* UFSHCD states */
73enum {
74 UFSHCD_STATE_OPERATIONAL,
75 UFSHCD_STATE_RESET,
76 UFSHCD_STATE_ERROR,
77};
78
79/* Interrupt configuration options */
80enum {
81 UFSHCD_INT_DISABLE,
82 UFSHCD_INT_ENABLE,
83 UFSHCD_INT_CLEAR,
84};
85
5a0b0cb9
SRT
86/*
87 * ufshcd_wait_for_register - wait for register value to change
88 * @hba - per-adapter interface
89 * @reg - mmio register offset
90 * @mask - mask to apply to read register value
91 * @val - wait condition
92 * @interval_us - polling interval in microsecs
93 * @timeout_ms - timeout in millisecs
94 *
95 * Returns -ETIMEDOUT on error, zero on success
96 */
97static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
98 u32 val, unsigned long interval_us, unsigned long timeout_ms)
99{
100 int err = 0;
101 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
102
103 /* ignore bits that we don't intend to wait on */
104 val = val & mask;
105
106 while ((ufshcd_readl(hba, reg) & mask) != val) {
107 /* wakeup within 50us of expiry */
108 usleep_range(interval_us, interval_us + 50);
109
110 if (time_after(jiffies, timeout)) {
111 if ((ufshcd_readl(hba, reg) & mask) != val)
112 err = -ETIMEDOUT;
113 break;
114 }
115 }
116
117 return err;
118}
119
2fbd009b
SJ
120/**
121 * ufshcd_get_intr_mask - Get the interrupt bit mask
122 * @hba - Pointer to adapter instance
123 *
124 * Returns interrupt bit mask per version
125 */
126static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
127{
128 if (hba->ufs_version == UFSHCI_VERSION_10)
129 return INTERRUPT_MASK_ALL_VER_10;
130 else
131 return INTERRUPT_MASK_ALL_VER_11;
132}
133
7a3e97b0
SY
134/**
135 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
136 * @hba - Pointer to adapter instance
137 *
138 * Returns UFSHCI version supported by the controller
139 */
140static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
141{
b873a275 142 return ufshcd_readl(hba, REG_UFS_VERSION);
7a3e97b0
SY
143}
144
145/**
146 * ufshcd_is_device_present - Check if any device connected to
147 * the host controller
148 * @reg_hcs - host controller status register value
149 *
73ec513a 150 * Returns 1 if device present, 0 if no device detected
7a3e97b0
SY
151 */
152static inline int ufshcd_is_device_present(u32 reg_hcs)
153{
73ec513a 154 return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
7a3e97b0
SY
155}
156
157/**
158 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
159 * @lrb: pointer to local command reference block
160 *
161 * This function is used to get the OCS field from UTRD
162 * Returns the OCS field in the UTRD
163 */
164static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
165{
166 return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
167}
168
169/**
170 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
171 * @task_req_descp: pointer to utp_task_req_desc structure
172 *
173 * This function is used to get the OCS field from UTMRD
174 * Returns the OCS field in the UTMRD
175 */
176static inline int
177ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
178{
179 return task_req_descp->header.dword_2 & MASK_OCS;
180}
181
182/**
183 * ufshcd_get_tm_free_slot - get a free slot for task management request
184 * @hba: per adapter instance
185 *
186 * Returns maximum number of task management request slots in case of
187 * task management queue full or returns the free slot number
188 */
189static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
190{
191 return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
192}
193
194/**
195 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
196 * @hba: per adapter instance
197 * @pos: position of the bit to be cleared
198 */
199static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
200{
b873a275 201 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
7a3e97b0
SY
202}
203
204/**
205 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
206 * @reg: Register value of host controller status
207 *
208 * Returns integer, 0 on Success and positive value if failed
209 */
210static inline int ufshcd_get_lists_status(u32 reg)
211{
212 /*
213 * The mask 0xFF is for the following HCS register bits
214 * Bit Description
215 * 0 Device Present
216 * 1 UTRLRDY
217 * 2 UTMRLRDY
218 * 3 UCRDY
219 * 4 HEI
220 * 5 DEI
221 * 6-7 reserved
222 */
223 return (((reg) & (0xFF)) >> 1) ^ (0x07);
224}
225
226/**
227 * ufshcd_get_uic_cmd_result - Get the UIC command result
228 * @hba: Pointer to adapter instance
229 *
230 * This function gets the result of UIC command completion
231 * Returns 0 on success, non zero value on error
232 */
233static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
234{
b873a275 235 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
7a3e97b0
SY
236 MASK_UIC_COMMAND_RESULT;
237}
238
12b4fdb4
SJ
239/**
240 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
241 * @hba: Pointer to adapter instance
242 *
243 * This function gets UIC command argument3
244 * Returns 0 on success, non zero value on error
245 */
246static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
247{
248 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
249}
250
7a3e97b0 251/**
5a0b0cb9 252 * ufshcd_get_req_rsp - returns the TR response transaction type
7a3e97b0 253 * @ucd_rsp_ptr: pointer to response UPIU
7a3e97b0
SY
254 */
255static inline int
5a0b0cb9 256ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
7a3e97b0 257{
5a0b0cb9 258 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
7a3e97b0
SY
259}
260
261/**
262 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
263 * @ucd_rsp_ptr: pointer to response UPIU
264 *
265 * This function gets the response status and scsi_status from response UPIU
266 * Returns the response result code.
267 */
268static inline int
269ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
270{
271 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
272}
273
1c2623c5
SJ
274/*
275 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
276 * from response UPIU
277 * @ucd_rsp_ptr: pointer to response UPIU
278 *
279 * Return the data segment length.
280 */
281static inline unsigned int
282ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
283{
284 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
285 MASK_RSP_UPIU_DATA_SEG_LEN;
286}
287
66ec6d59
SRT
288/**
289 * ufshcd_is_exception_event - Check if the device raised an exception event
290 * @ucd_rsp_ptr: pointer to response UPIU
291 *
292 * The function checks if the device raised an exception event indicated in
293 * the Device Information field of response UPIU.
294 *
295 * Returns true if exception is raised, false otherwise.
296 */
297static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
298{
299 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
300 MASK_RSP_EXCEPTION_EVENT ? true : false;
301}
302
7a3e97b0 303/**
7d568652 304 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
7a3e97b0 305 * @hba: per adapter instance
7a3e97b0
SY
306 */
307static inline void
7d568652 308ufshcd_reset_intr_aggr(struct ufs_hba *hba)
7a3e97b0 309{
7d568652
SJ
310 ufshcd_writel(hba, INT_AGGR_ENABLE |
311 INT_AGGR_COUNTER_AND_TIMER_RESET,
312 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
313}
314
315/**
316 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
317 * @hba: per adapter instance
318 * @cnt: Interrupt aggregation counter threshold
319 * @tmout: Interrupt aggregation timeout value
320 */
321static inline void
322ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
323{
324 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
325 INT_AGGR_COUNTER_THLD_VAL(cnt) |
326 INT_AGGR_TIMEOUT_VAL(tmout),
327 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
7a3e97b0
SY
328}
329
330/**
331 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
332 * When run-stop registers are set to 1, it indicates the
333 * host controller that it can process the requests
334 * @hba: per adapter instance
335 */
336static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
337{
b873a275
SJ
338 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
339 REG_UTP_TASK_REQ_LIST_RUN_STOP);
340 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
341 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
7a3e97b0
SY
342}
343
7a3e97b0
SY
344/**
345 * ufshcd_hba_start - Start controller initialization sequence
346 * @hba: per adapter instance
347 */
348static inline void ufshcd_hba_start(struct ufs_hba *hba)
349{
b873a275 350 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
7a3e97b0
SY
351}
352
353/**
354 * ufshcd_is_hba_active - Get controller state
355 * @hba: per adapter instance
356 *
357 * Returns zero if controller is active, 1 otherwise
358 */
359static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
360{
b873a275 361 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
7a3e97b0
SY
362}
363
364/**
365 * ufshcd_send_command - Send SCSI or device management commands
366 * @hba: per adapter instance
367 * @task_tag: Task tag of the command
368 */
369static inline
370void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
371{
372 __set_bit(task_tag, &hba->outstanding_reqs);
b873a275 373 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7a3e97b0
SY
374}
375
376/**
377 * ufshcd_copy_sense_data - Copy sense data in case of check condition
378 * @lrb - pointer to local reference block
379 */
380static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
381{
382 int len;
1c2623c5
SJ
383 if (lrbp->sense_buffer &&
384 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
5a0b0cb9 385 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
7a3e97b0 386 memcpy(lrbp->sense_buffer,
5a0b0cb9 387 lrbp->ucd_rsp_ptr->sr.sense_data,
7a3e97b0
SY
388 min_t(int, len, SCSI_SENSE_BUFFERSIZE));
389 }
390}
391
68078d5c
DR
392/**
393 * ufshcd_query_to_cpu() - formats the buffer to native cpu endian
394 * @response: upiu query response to convert
395 */
396static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response)
397{
398 response->length = be16_to_cpu(response->length);
399 response->value = be32_to_cpu(response->value);
400}
401
402/**
403 * ufshcd_query_to_be() - formats the buffer to big endian
404 * @request: upiu query request to convert
405 */
406static inline void ufshcd_query_to_be(struct utp_upiu_query *request)
407{
408 request->length = cpu_to_be16(request->length);
409 request->value = cpu_to_be32(request->value);
410}
411
412/**
413 * ufshcd_copy_query_response() - Copy the Query Response and the data
414 * descriptor
415 * @hba: per adapter instance
416 * @lrb - pointer to local reference block
417 */
418static
419void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
420{
421 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
422
423 /* Get the UPIU response */
424 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
425 UPIU_RSP_CODE_OFFSET;
426
427 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
428 ufshcd_query_to_cpu(&query_res->upiu_res);
429
430
431 /* Get the descriptor */
432 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
433 u8 *descp = (u8 *)&lrbp->ucd_rsp_ptr +
434 GENERAL_UPIU_REQUEST_SIZE;
435 u16 len;
436
437 /* data segment length */
438 len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
439 MASK_QUERY_DATA_SEG_LEN;
440
441 memcpy(hba->dev_cmd.query.descriptor, descp,
442 min_t(u16, len, QUERY_DESC_MAX_SIZE));
443 }
444}
445
7a3e97b0
SY
446/**
447 * ufshcd_hba_capabilities - Read controller capabilities
448 * @hba: per adapter instance
449 */
450static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
451{
b873a275 452 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
7a3e97b0
SY
453
454 /* nutrs and nutmrs are 0 based values */
455 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
456 hba->nutmrs =
457 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
458}
459
460/**
6ccf44fe
SJ
461 * ufshcd_ready_for_uic_cmd - Check if controller is ready
462 * to accept UIC commands
7a3e97b0 463 * @hba: per adapter instance
6ccf44fe
SJ
464 * Return true on success, else false
465 */
466static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
467{
468 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
469 return true;
470 else
471 return false;
472}
473
53b3d9c3
SJ
474/**
475 * ufshcd_get_upmcrs - Get the power mode change request status
476 * @hba: Pointer to adapter instance
477 *
478 * This function gets the UPMCRS field of HCS register
479 * Returns value of UPMCRS field
480 */
481static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
482{
483 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
484}
485
6ccf44fe
SJ
486/**
487 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
488 * @hba: per adapter instance
489 * @uic_cmd: UIC command
490 *
491 * Mutex must be held.
7a3e97b0
SY
492 */
493static inline void
6ccf44fe 494ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
7a3e97b0 495{
6ccf44fe
SJ
496 WARN_ON(hba->active_uic_cmd);
497
498 hba->active_uic_cmd = uic_cmd;
499
7a3e97b0 500 /* Write Args */
6ccf44fe
SJ
501 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
502 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
503 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
7a3e97b0
SY
504
505 /* Write UIC Cmd */
6ccf44fe 506 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
b873a275 507 REG_UIC_COMMAND);
7a3e97b0
SY
508}
509
6ccf44fe
SJ
510/**
511 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
512 * @hba: per adapter instance
513 * @uic_command: UIC command
514 *
515 * Must be called with mutex held.
516 * Returns 0 only if success.
517 */
518static int
519ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
520{
521 int ret;
522 unsigned long flags;
523
524 if (wait_for_completion_timeout(&uic_cmd->done,
525 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
526 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
527 else
528 ret = -ETIMEDOUT;
529
530 spin_lock_irqsave(hba->host->host_lock, flags);
531 hba->active_uic_cmd = NULL;
532 spin_unlock_irqrestore(hba->host->host_lock, flags);
533
534 return ret;
535}
536
537/**
538 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
539 * @hba: per adapter instance
540 * @uic_cmd: UIC command
541 *
542 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
543 * with mutex held.
544 * Returns 0 only if success.
545 */
546static int
547__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
548{
549 int ret;
550 unsigned long flags;
551
552 if (!ufshcd_ready_for_uic_cmd(hba)) {
553 dev_err(hba->dev,
554 "Controller not ready to accept UIC commands\n");
555 return -EIO;
556 }
557
558 init_completion(&uic_cmd->done);
559
560 spin_lock_irqsave(hba->host->host_lock, flags);
561 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
562 spin_unlock_irqrestore(hba->host->host_lock, flags);
563
564 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
565
566 return ret;
567}
568
569/**
570 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
571 * @hba: per adapter instance
572 * @uic_cmd: UIC command
573 *
574 * Returns 0 only if success.
575 */
576static int
577ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
578{
579 int ret;
580
581 mutex_lock(&hba->uic_cmd_mutex);
582 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
583 mutex_unlock(&hba->uic_cmd_mutex);
584
585 return ret;
586}
587
7a3e97b0
SY
588/**
589 * ufshcd_map_sg - Map scatter-gather list to prdt
590 * @lrbp - pointer to local reference block
591 *
592 * Returns 0 in case of success, non-zero value in case of failure
593 */
594static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
595{
596 struct ufshcd_sg_entry *prd_table;
597 struct scatterlist *sg;
598 struct scsi_cmnd *cmd;
599 int sg_segments;
600 int i;
601
602 cmd = lrbp->cmd;
603 sg_segments = scsi_dma_map(cmd);
604 if (sg_segments < 0)
605 return sg_segments;
606
607 if (sg_segments) {
608 lrbp->utr_descriptor_ptr->prd_table_length =
609 cpu_to_le16((u16) (sg_segments));
610
611 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
612
613 scsi_for_each_sg(cmd, sg, sg_segments, i) {
614 prd_table[i].size =
615 cpu_to_le32(((u32) sg_dma_len(sg))-1);
616 prd_table[i].base_addr =
617 cpu_to_le32(lower_32_bits(sg->dma_address));
618 prd_table[i].upper_addr =
619 cpu_to_le32(upper_32_bits(sg->dma_address));
620 }
621 } else {
622 lrbp->utr_descriptor_ptr->prd_table_length = 0;
623 }
624
625 return 0;
626}
627
628/**
2fbd009b 629 * ufshcd_enable_intr - enable interrupts
7a3e97b0 630 * @hba: per adapter instance
2fbd009b 631 * @intrs: interrupt bits
7a3e97b0 632 */
2fbd009b 633static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
7a3e97b0 634{
2fbd009b
SJ
635 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
636
637 if (hba->ufs_version == UFSHCI_VERSION_10) {
638 u32 rw;
639 rw = set & INTERRUPT_MASK_RW_VER_10;
640 set = rw | ((set ^ intrs) & intrs);
641 } else {
642 set |= intrs;
643 }
644
645 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
646}
647
648/**
649 * ufshcd_disable_intr - disable interrupts
650 * @hba: per adapter instance
651 * @intrs: interrupt bits
652 */
653static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
654{
655 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
656
657 if (hba->ufs_version == UFSHCI_VERSION_10) {
658 u32 rw;
659 rw = (set & INTERRUPT_MASK_RW_VER_10) &
660 ~(intrs & INTERRUPT_MASK_RW_VER_10);
661 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
662
663 } else {
664 set &= ~intrs;
7a3e97b0 665 }
2fbd009b
SJ
666
667 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
7a3e97b0
SY
668}
669
5a0b0cb9
SRT
670/**
671 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
672 * descriptor according to request
673 * @lrbp: pointer to local reference block
674 * @upiu_flags: flags required in the header
675 * @cmd_dir: requests data direction
676 */
677static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
678 u32 *upiu_flags, enum dma_data_direction cmd_dir)
679{
680 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
681 u32 data_direction;
682 u32 dword_0;
683
684 if (cmd_dir == DMA_FROM_DEVICE) {
685 data_direction = UTP_DEVICE_TO_HOST;
686 *upiu_flags = UPIU_CMD_FLAGS_READ;
687 } else if (cmd_dir == DMA_TO_DEVICE) {
688 data_direction = UTP_HOST_TO_DEVICE;
689 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
690 } else {
691 data_direction = UTP_NO_DATA_TRANSFER;
692 *upiu_flags = UPIU_CMD_FLAGS_NONE;
693 }
694
695 dword_0 = data_direction | (lrbp->command_type
696 << UPIU_COMMAND_TYPE_OFFSET);
697 if (lrbp->intr_cmd)
698 dword_0 |= UTP_REQ_DESC_INT_CMD;
699
700 /* Transfer request descriptor header fields */
701 req_desc->header.dword_0 = cpu_to_le32(dword_0);
702
703 /*
704 * assigning invalid value for command status. Controller
705 * updates OCS on command completion, with the command
706 * status
707 */
708 req_desc->header.dword_2 =
709 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
710}
711
712/**
713 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
714 * for scsi commands
715 * @lrbp - local reference block pointer
716 * @upiu_flags - flags
717 */
718static
719void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
720{
721 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
722
723 /* command descriptor fields */
724 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
725 UPIU_TRANSACTION_COMMAND, upiu_flags,
726 lrbp->lun, lrbp->task_tag);
727 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
728 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
729
730 /* Total EHS length and Data segment length will be zero */
731 ucd_req_ptr->header.dword_2 = 0;
732
733 ucd_req_ptr->sc.exp_data_transfer_len =
734 cpu_to_be32(lrbp->cmd->sdb.length);
735
736 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
737 (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
738}
739
68078d5c
DR
740/**
741 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
742 * for query requsts
743 * @hba: UFS hba
744 * @lrbp: local reference block pointer
745 * @upiu_flags: flags
746 */
747static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
748 struct ufshcd_lrb *lrbp, u32 upiu_flags)
749{
750 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
751 struct ufs_query *query = &hba->dev_cmd.query;
752 u16 len = query->request.upiu_req.length;
753 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
754
755 /* Query request header */
756 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
757 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
758 lrbp->lun, lrbp->task_tag);
759 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
760 0, query->request.query_func, 0, 0);
761
762 /* Data segment length */
763 ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
764 0, 0, len >> 8, (u8)len);
765
766 /* Copy the Query Request buffer as is */
767 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
768 QUERY_OSF_SIZE);
769 ufshcd_query_to_be(&ucd_req_ptr->qr);
770
771 /* Copy the Descriptor */
772 if ((len > 0) && (query->request.upiu_req.opcode ==
773 UPIU_QUERY_OPCODE_WRITE_DESC)) {
774 memcpy(descp, query->descriptor,
775 min_t(u16, len, QUERY_DESC_MAX_SIZE));
776 }
777}
778
5a0b0cb9
SRT
779static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
780{
781 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
782
783 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
784
785 /* command descriptor fields */
786 ucd_req_ptr->header.dword_0 =
787 UPIU_HEADER_DWORD(
788 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
789}
790
7a3e97b0
SY
791/**
792 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
5a0b0cb9 793 * @hba - per adapter instance
7a3e97b0
SY
794 * @lrb - pointer to local reference block
795 */
5a0b0cb9 796static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
7a3e97b0 797{
7a3e97b0 798 u32 upiu_flags;
5a0b0cb9 799 int ret = 0;
7a3e97b0
SY
800
801 switch (lrbp->command_type) {
802 case UTP_CMD_TYPE_SCSI:
5a0b0cb9
SRT
803 if (likely(lrbp->cmd)) {
804 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
805 lrbp->cmd->sc_data_direction);
806 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
7a3e97b0 807 } else {
5a0b0cb9 808 ret = -EINVAL;
7a3e97b0 809 }
7a3e97b0
SY
810 break;
811 case UTP_CMD_TYPE_DEV_MANAGE:
5a0b0cb9 812 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
68078d5c
DR
813 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
814 ufshcd_prepare_utp_query_req_upiu(
815 hba, lrbp, upiu_flags);
816 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
5a0b0cb9
SRT
817 ufshcd_prepare_utp_nop_upiu(lrbp);
818 else
819 ret = -EINVAL;
7a3e97b0
SY
820 break;
821 case UTP_CMD_TYPE_UFS:
822 /* For UFS native command implementation */
5a0b0cb9
SRT
823 ret = -ENOTSUPP;
824 dev_err(hba->dev, "%s: UFS native command are not supported\n",
825 __func__);
826 break;
827 default:
828 ret = -ENOTSUPP;
829 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
830 __func__, lrbp->command_type);
7a3e97b0
SY
831 break;
832 } /* end of switch */
5a0b0cb9
SRT
833
834 return ret;
7a3e97b0
SY
835}
836
837/**
838 * ufshcd_queuecommand - main entry point for SCSI requests
839 * @cmd: command from SCSI Midlayer
840 * @done: call back function
841 *
842 * Returns 0 for success, non-zero in case of failure
843 */
844static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
845{
846 struct ufshcd_lrb *lrbp;
847 struct ufs_hba *hba;
848 unsigned long flags;
849 int tag;
850 int err = 0;
851
852 hba = shost_priv(host);
853
854 tag = cmd->request->tag;
855
856 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
857 err = SCSI_MLQUEUE_HOST_BUSY;
858 goto out;
859 }
860
5a0b0cb9
SRT
861 /* acquire the tag to make sure device cmds don't use it */
862 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
863 /*
864 * Dev manage command in progress, requeue the command.
865 * Requeuing the command helps in cases where the request *may*
866 * find different tag instead of waiting for dev manage command
867 * completion.
868 */
869 err = SCSI_MLQUEUE_HOST_BUSY;
870 goto out;
871 }
872
7a3e97b0
SY
873 lrbp = &hba->lrb[tag];
874
5a0b0cb9 875 WARN_ON(lrbp->cmd);
7a3e97b0
SY
876 lrbp->cmd = cmd;
877 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
878 lrbp->sense_buffer = cmd->sense_buffer;
879 lrbp->task_tag = tag;
880 lrbp->lun = cmd->device->lun;
5a0b0cb9 881 lrbp->intr_cmd = false;
7a3e97b0
SY
882 lrbp->command_type = UTP_CMD_TYPE_SCSI;
883
884 /* form UPIU before issuing the command */
5a0b0cb9 885 ufshcd_compose_upiu(hba, lrbp);
7a3e97b0 886 err = ufshcd_map_sg(lrbp);
5a0b0cb9
SRT
887 if (err) {
888 lrbp->cmd = NULL;
889 clear_bit_unlock(tag, &hba->lrb_in_use);
7a3e97b0 890 goto out;
5a0b0cb9 891 }
7a3e97b0
SY
892
893 /* issue command to the controller */
894 spin_lock_irqsave(hba->host->host_lock, flags);
895 ufshcd_send_command(hba, tag);
896 spin_unlock_irqrestore(hba->host->host_lock, flags);
897out:
898 return err;
899}
900
5a0b0cb9
SRT
901static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
902 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
903{
904 lrbp->cmd = NULL;
905 lrbp->sense_bufflen = 0;
906 lrbp->sense_buffer = NULL;
907 lrbp->task_tag = tag;
908 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
909 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
910 lrbp->intr_cmd = true; /* No interrupt aggregation */
911 hba->dev_cmd.type = cmd_type;
912
913 return ufshcd_compose_upiu(hba, lrbp);
914}
915
916static int
917ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
918{
919 int err = 0;
920 unsigned long flags;
921 u32 mask = 1 << tag;
922
923 /* clear outstanding transaction before retry */
924 spin_lock_irqsave(hba->host->host_lock, flags);
925 ufshcd_utrl_clear(hba, tag);
926 spin_unlock_irqrestore(hba->host->host_lock, flags);
927
928 /*
929 * wait for for h/w to clear corresponding bit in door-bell.
930 * max. wait is 1 sec.
931 */
932 err = ufshcd_wait_for_register(hba,
933 REG_UTP_TRANSFER_REQ_DOOR_BELL,
934 mask, ~mask, 1000, 1000);
935
936 return err;
937}
938
939/**
940 * ufshcd_dev_cmd_completion() - handles device management command responses
941 * @hba: per adapter instance
942 * @lrbp: pointer to local reference block
943 */
944static int
945ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
946{
947 int resp;
948 int err = 0;
949
950 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
951
952 switch (resp) {
953 case UPIU_TRANSACTION_NOP_IN:
954 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
955 err = -EINVAL;
956 dev_err(hba->dev, "%s: unexpected response %x\n",
957 __func__, resp);
958 }
959 break;
68078d5c
DR
960 case UPIU_TRANSACTION_QUERY_RSP:
961 ufshcd_copy_query_response(hba, lrbp);
962 break;
5a0b0cb9
SRT
963 case UPIU_TRANSACTION_REJECT_UPIU:
964 /* TODO: handle Reject UPIU Response */
965 err = -EPERM;
966 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
967 __func__);
968 break;
969 default:
970 err = -EINVAL;
971 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
972 __func__, resp);
973 break;
974 }
975
976 return err;
977}
978
979static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
980 struct ufshcd_lrb *lrbp, int max_timeout)
981{
982 int err = 0;
983 unsigned long time_left;
984 unsigned long flags;
985
986 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
987 msecs_to_jiffies(max_timeout));
988
989 spin_lock_irqsave(hba->host->host_lock, flags);
990 hba->dev_cmd.complete = NULL;
991 if (likely(time_left)) {
992 err = ufshcd_get_tr_ocs(lrbp);
993 if (!err)
994 err = ufshcd_dev_cmd_completion(hba, lrbp);
995 }
996 spin_unlock_irqrestore(hba->host->host_lock, flags);
997
998 if (!time_left) {
999 err = -ETIMEDOUT;
1000 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
1001 /* sucessfully cleared the command, retry if needed */
1002 err = -EAGAIN;
1003 }
1004
1005 return err;
1006}
1007
1008/**
1009 * ufshcd_get_dev_cmd_tag - Get device management command tag
1010 * @hba: per-adapter instance
1011 * @tag: pointer to variable with available slot value
1012 *
1013 * Get a free slot and lock it until device management command
1014 * completes.
1015 *
1016 * Returns false if free slot is unavailable for locking, else
1017 * return true with tag value in @tag.
1018 */
1019static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1020{
1021 int tag;
1022 bool ret = false;
1023 unsigned long tmp;
1024
1025 if (!tag_out)
1026 goto out;
1027
1028 do {
1029 tmp = ~hba->lrb_in_use;
1030 tag = find_last_bit(&tmp, hba->nutrs);
1031 if (tag >= hba->nutrs)
1032 goto out;
1033 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1034
1035 *tag_out = tag;
1036 ret = true;
1037out:
1038 return ret;
1039}
1040
1041static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1042{
1043 clear_bit_unlock(tag, &hba->lrb_in_use);
1044}
1045
1046/**
1047 * ufshcd_exec_dev_cmd - API for sending device management requests
1048 * @hba - UFS hba
1049 * @cmd_type - specifies the type (NOP, Query...)
1050 * @timeout - time in seconds
1051 *
68078d5c
DR
1052 * NOTE: Since there is only one available tag for device management commands,
1053 * it is expected you hold the hba->dev_cmd.lock mutex.
5a0b0cb9
SRT
1054 */
1055static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1056 enum dev_cmd_type cmd_type, int timeout)
1057{
1058 struct ufshcd_lrb *lrbp;
1059 int err;
1060 int tag;
1061 struct completion wait;
1062 unsigned long flags;
1063
1064 /*
1065 * Get free slot, sleep if slots are unavailable.
1066 * Even though we use wait_event() which sleeps indefinitely,
1067 * the maximum wait time is bounded by SCSI request timeout.
1068 */
1069 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1070
1071 init_completion(&wait);
1072 lrbp = &hba->lrb[tag];
1073 WARN_ON(lrbp->cmd);
1074 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1075 if (unlikely(err))
1076 goto out_put_tag;
1077
1078 hba->dev_cmd.complete = &wait;
1079
1080 spin_lock_irqsave(hba->host->host_lock, flags);
1081 ufshcd_send_command(hba, tag);
1082 spin_unlock_irqrestore(hba->host->host_lock, flags);
1083
1084 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1085
1086out_put_tag:
1087 ufshcd_put_dev_cmd_tag(hba, tag);
1088 wake_up(&hba->dev_cmd.tag_wq);
1089 return err;
1090}
1091
68078d5c
DR
1092/**
1093 * ufshcd_query_flag() - API function for sending flag query requests
1094 * hba: per-adapter instance
1095 * query_opcode: flag query to perform
1096 * idn: flag idn to access
1097 * flag_res: the flag value after the query request completes
1098 *
1099 * Returns 0 for success, non-zero in case of failure
1100 */
1101static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1102 enum flag_idn idn, bool *flag_res)
1103{
1104 struct ufs_query_req *request;
1105 struct ufs_query_res *response;
1106 int err;
1107
1108 BUG_ON(!hba);
1109
1110 mutex_lock(&hba->dev_cmd.lock);
1111 request = &hba->dev_cmd.query.request;
1112 response = &hba->dev_cmd.query.response;
1113 memset(request, 0, sizeof(struct ufs_query_req));
1114 memset(response, 0, sizeof(struct ufs_query_res));
1115
1116 switch (opcode) {
1117 case UPIU_QUERY_OPCODE_SET_FLAG:
1118 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1119 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1120 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1121 break;
1122 case UPIU_QUERY_OPCODE_READ_FLAG:
1123 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1124 if (!flag_res) {
1125 /* No dummy reads */
1126 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1127 __func__);
1128 err = -EINVAL;
1129 goto out_unlock;
1130 }
1131 break;
1132 default:
1133 dev_err(hba->dev,
1134 "%s: Expected query flag opcode but got = %d\n",
1135 __func__, opcode);
1136 err = -EINVAL;
1137 goto out_unlock;
1138 }
1139 request->upiu_req.opcode = opcode;
1140 request->upiu_req.idn = idn;
1141
1142 /* Send query request */
1143 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY,
1144 QUERY_REQ_TIMEOUT);
1145
1146 if (err) {
1147 dev_err(hba->dev,
1148 "%s: Sending flag query for idn %d failed, err = %d\n",
1149 __func__, idn, err);
1150 goto out_unlock;
1151 }
1152
1153 if (flag_res)
1154 *flag_res = (response->upiu_res.value &
1155 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1156
1157out_unlock:
1158 mutex_unlock(&hba->dev_cmd.lock);
1159 return err;
1160}
1161
66ec6d59
SRT
1162/**
1163 * ufshcd_query_attr - API function for sending attribute requests
1164 * hba: per-adapter instance
1165 * opcode: attribute opcode
1166 * idn: attribute idn to access
1167 * index: index field
1168 * selector: selector field
1169 * attr_val: the attribute value after the query request completes
1170 *
1171 * Returns 0 for success, non-zero in case of failure
1172*/
1173int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1174 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1175{
1176 struct ufs_query_req *request;
1177 struct ufs_query_res *response;
1178 int err;
1179
1180 BUG_ON(!hba);
1181
1182 if (!attr_val) {
1183 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1184 __func__, opcode);
1185 err = -EINVAL;
1186 goto out;
1187 }
1188
1189 mutex_lock(&hba->dev_cmd.lock);
1190 request = &hba->dev_cmd.query.request;
1191 response = &hba->dev_cmd.query.response;
1192 memset(request, 0, sizeof(struct ufs_query_req));
1193 memset(response, 0, sizeof(struct ufs_query_res));
1194
1195 switch (opcode) {
1196 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1197 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1198 request->upiu_req.value = *attr_val;
1199 break;
1200 case UPIU_QUERY_OPCODE_READ_ATTR:
1201 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1202 break;
1203 default:
1204 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1205 __func__, opcode);
1206 err = -EINVAL;
1207 goto out_unlock;
1208 }
1209
1210 request->upiu_req.opcode = opcode;
1211 request->upiu_req.idn = idn;
1212 request->upiu_req.index = index;
1213 request->upiu_req.selector = selector;
1214
1215 /* Send query request */
1216 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY,
1217 QUERY_REQ_TIMEOUT);
1218
1219 if (err) {
1220 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1221 __func__, opcode, idn, err);
1222 goto out_unlock;
1223 }
1224
1225 *attr_val = response->upiu_res.value;
1226
1227out_unlock:
1228 mutex_unlock(&hba->dev_cmd.lock);
1229out:
1230 return err;
1231}
1232
7a3e97b0
SY
1233/**
1234 * ufshcd_memory_alloc - allocate memory for host memory space data structures
1235 * @hba: per adapter instance
1236 *
1237 * 1. Allocate DMA memory for Command Descriptor array
1238 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
1239 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
1240 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
1241 * (UTMRDL)
1242 * 4. Allocate memory for local reference block(lrb).
1243 *
1244 * Returns 0 for success, non-zero in case of failure
1245 */
1246static int ufshcd_memory_alloc(struct ufs_hba *hba)
1247{
1248 size_t utmrdl_size, utrdl_size, ucdl_size;
1249
1250 /* Allocate memory for UTP command descriptors */
1251 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
2953f850
SJ
1252 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
1253 ucdl_size,
1254 &hba->ucdl_dma_addr,
1255 GFP_KERNEL);
7a3e97b0
SY
1256
1257 /*
1258 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
1259 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
1260 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
1261 * be aligned to 128 bytes as well
1262 */
1263 if (!hba->ucdl_base_addr ||
1264 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 1265 dev_err(hba->dev,
7a3e97b0
SY
1266 "Command Descriptor Memory allocation failed\n");
1267 goto out;
1268 }
1269
1270 /*
1271 * Allocate memory for UTP Transfer descriptors
1272 * UFSHCI requires 1024 byte alignment of UTRD
1273 */
1274 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2953f850
SJ
1275 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
1276 utrdl_size,
1277 &hba->utrdl_dma_addr,
1278 GFP_KERNEL);
7a3e97b0
SY
1279 if (!hba->utrdl_base_addr ||
1280 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 1281 dev_err(hba->dev,
7a3e97b0
SY
1282 "Transfer Descriptor Memory allocation failed\n");
1283 goto out;
1284 }
1285
1286 /*
1287 * Allocate memory for UTP Task Management descriptors
1288 * UFSHCI requires 1024 byte alignment of UTMRD
1289 */
1290 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2953f850
SJ
1291 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
1292 utmrdl_size,
1293 &hba->utmrdl_dma_addr,
1294 GFP_KERNEL);
7a3e97b0
SY
1295 if (!hba->utmrdl_base_addr ||
1296 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 1297 dev_err(hba->dev,
7a3e97b0
SY
1298 "Task Management Descriptor Memory allocation failed\n");
1299 goto out;
1300 }
1301
1302 /* Allocate memory for local reference block */
2953f850
SJ
1303 hba->lrb = devm_kzalloc(hba->dev,
1304 hba->nutrs * sizeof(struct ufshcd_lrb),
1305 GFP_KERNEL);
7a3e97b0 1306 if (!hba->lrb) {
3b1d0580 1307 dev_err(hba->dev, "LRB Memory allocation failed\n");
7a3e97b0
SY
1308 goto out;
1309 }
1310 return 0;
1311out:
7a3e97b0
SY
1312 return -ENOMEM;
1313}
1314
1315/**
1316 * ufshcd_host_memory_configure - configure local reference block with
1317 * memory offsets
1318 * @hba: per adapter instance
1319 *
1320 * Configure Host memory space
1321 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
1322 * address.
1323 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
1324 * and PRDT offset.
1325 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
1326 * into local reference block.
1327 */
1328static void ufshcd_host_memory_configure(struct ufs_hba *hba)
1329{
1330 struct utp_transfer_cmd_desc *cmd_descp;
1331 struct utp_transfer_req_desc *utrdlp;
1332 dma_addr_t cmd_desc_dma_addr;
1333 dma_addr_t cmd_desc_element_addr;
1334 u16 response_offset;
1335 u16 prdt_offset;
1336 int cmd_desc_size;
1337 int i;
1338
1339 utrdlp = hba->utrdl_base_addr;
1340 cmd_descp = hba->ucdl_base_addr;
1341
1342 response_offset =
1343 offsetof(struct utp_transfer_cmd_desc, response_upiu);
1344 prdt_offset =
1345 offsetof(struct utp_transfer_cmd_desc, prd_table);
1346
1347 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
1348 cmd_desc_dma_addr = hba->ucdl_dma_addr;
1349
1350 for (i = 0; i < hba->nutrs; i++) {
1351 /* Configure UTRD with command descriptor base address */
1352 cmd_desc_element_addr =
1353 (cmd_desc_dma_addr + (cmd_desc_size * i));
1354 utrdlp[i].command_desc_base_addr_lo =
1355 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
1356 utrdlp[i].command_desc_base_addr_hi =
1357 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
1358
1359 /* Response upiu and prdt offset should be in double words */
1360 utrdlp[i].response_upiu_offset =
1361 cpu_to_le16((response_offset >> 2));
1362 utrdlp[i].prd_table_offset =
1363 cpu_to_le16((prdt_offset >> 2));
1364 utrdlp[i].response_upiu_length =
3ca316c5 1365 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
7a3e97b0
SY
1366
1367 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
5a0b0cb9
SRT
1368 hba->lrb[i].ucd_req_ptr =
1369 (struct utp_upiu_req *)(cmd_descp + i);
7a3e97b0
SY
1370 hba->lrb[i].ucd_rsp_ptr =
1371 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
1372 hba->lrb[i].ucd_prdt_ptr =
1373 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
1374 }
1375}
1376
1377/**
1378 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
1379 * @hba: per adapter instance
1380 *
1381 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
1382 * in order to initialize the Unipro link startup procedure.
1383 * Once the Unipro links are up, the device connected to the controller
1384 * is detected.
1385 *
1386 * Returns 0 on success, non-zero value on failure
1387 */
1388static int ufshcd_dme_link_startup(struct ufs_hba *hba)
1389{
6ccf44fe
SJ
1390 struct uic_command uic_cmd = {0};
1391 int ret;
7a3e97b0 1392
6ccf44fe 1393 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
7a3e97b0 1394
6ccf44fe
SJ
1395 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1396 if (ret)
1397 dev_err(hba->dev,
1398 "dme-link-startup: error code %d\n", ret);
1399 return ret;
7a3e97b0
SY
1400}
1401
12b4fdb4
SJ
1402/**
1403 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
1404 * @hba: per adapter instance
1405 * @attr_sel: uic command argument1
1406 * @attr_set: attribute set type as uic command argument2
1407 * @mib_val: setting value as uic command argument3
1408 * @peer: indicate whether peer or local
1409 *
1410 * Returns 0 on success, non-zero value on failure
1411 */
1412int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
1413 u8 attr_set, u32 mib_val, u8 peer)
1414{
1415 struct uic_command uic_cmd = {0};
1416 static const char *const action[] = {
1417 "dme-set",
1418 "dme-peer-set"
1419 };
1420 const char *set = action[!!peer];
1421 int ret;
1422
1423 uic_cmd.command = peer ?
1424 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
1425 uic_cmd.argument1 = attr_sel;
1426 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
1427 uic_cmd.argument3 = mib_val;
1428
1429 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1430 if (ret)
1431 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
1432 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
1433
1434 return ret;
1435}
1436EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
1437
1438/**
1439 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
1440 * @hba: per adapter instance
1441 * @attr_sel: uic command argument1
1442 * @mib_val: the value of the attribute as returned by the UIC command
1443 * @peer: indicate whether peer or local
1444 *
1445 * Returns 0 on success, non-zero value on failure
1446 */
1447int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
1448 u32 *mib_val, u8 peer)
1449{
1450 struct uic_command uic_cmd = {0};
1451 static const char *const action[] = {
1452 "dme-get",
1453 "dme-peer-get"
1454 };
1455 const char *get = action[!!peer];
1456 int ret;
1457
1458 uic_cmd.command = peer ?
1459 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
1460 uic_cmd.argument1 = attr_sel;
1461
1462 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1463 if (ret) {
1464 dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
1465 get, UIC_GET_ATTR_ID(attr_sel), ret);
1466 goto out;
1467 }
1468
1469 if (mib_val)
1470 *mib_val = uic_cmd.argument3;
1471out:
1472 return ret;
1473}
1474EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
1475
53b3d9c3
SJ
1476/**
1477 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
1478 * using DME_SET primitives.
1479 * @hba: per adapter instance
1480 * @mode: powr mode value
1481 *
1482 * Returns 0 on success, non-zero value on failure
1483 */
1484int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1485{
1486 struct uic_command uic_cmd = {0};
1487 struct completion pwr_done;
1488 unsigned long flags;
1489 u8 status;
1490 int ret;
1491
1492 uic_cmd.command = UIC_CMD_DME_SET;
1493 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1494 uic_cmd.argument3 = mode;
1495 init_completion(&pwr_done);
1496
1497 mutex_lock(&hba->uic_cmd_mutex);
1498
1499 spin_lock_irqsave(hba->host->host_lock, flags);
1500 hba->pwr_done = &pwr_done;
1501 spin_unlock_irqrestore(hba->host->host_lock, flags);
1502 ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
1503 if (ret) {
1504 dev_err(hba->dev,
1505 "pwr mode change with mode 0x%x uic error %d\n",
1506 mode, ret);
1507 goto out;
1508 }
1509
1510 if (!wait_for_completion_timeout(hba->pwr_done,
1511 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
1512 dev_err(hba->dev,
1513 "pwr mode change with mode 0x%x completion timeout\n",
1514 mode);
1515 ret = -ETIMEDOUT;
1516 goto out;
1517 }
1518
1519 status = ufshcd_get_upmcrs(hba);
1520 if (status != PWR_LOCAL) {
1521 dev_err(hba->dev,
1522 "pwr mode change failed, host umpcrs:0x%x\n",
1523 status);
1524 ret = (status != PWR_OK) ? status : -1;
1525 }
1526out:
1527 spin_lock_irqsave(hba->host->host_lock, flags);
1528 hba->pwr_done = NULL;
1529 spin_unlock_irqrestore(hba->host->host_lock, flags);
1530 mutex_unlock(&hba->uic_cmd_mutex);
1531 return ret;
1532}
1533
68078d5c
DR
1534/**
1535 * ufshcd_complete_dev_init() - checks device readiness
1536 * hba: per-adapter instance
1537 *
1538 * Set fDeviceInit flag and poll until device toggles it.
1539 */
1540static int ufshcd_complete_dev_init(struct ufs_hba *hba)
1541{
1542 int i, retries, err = 0;
1543 bool flag_res = 1;
1544
1545 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1546 /* Set the fDeviceInit flag */
1547 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1548 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
1549 if (!err || err == -ETIMEDOUT)
1550 break;
1551 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1552 }
1553 if (err) {
1554 dev_err(hba->dev,
1555 "%s setting fDeviceInit flag failed with error %d\n",
1556 __func__, err);
1557 goto out;
1558 }
1559
1560 /* poll for max. 100 iterations for fDeviceInit flag to clear */
1561 for (i = 0; i < 100 && !err && flag_res; i++) {
1562 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1563 err = ufshcd_query_flag(hba,
1564 UPIU_QUERY_OPCODE_READ_FLAG,
1565 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
1566 if (!err || err == -ETIMEDOUT)
1567 break;
1568 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
1569 err);
1570 }
1571 }
1572 if (err)
1573 dev_err(hba->dev,
1574 "%s reading fDeviceInit flag failed with error %d\n",
1575 __func__, err);
1576 else if (flag_res)
1577 dev_err(hba->dev,
1578 "%s fDeviceInit was not cleared by the device\n",
1579 __func__);
1580
1581out:
1582 return err;
1583}
1584
7a3e97b0
SY
1585/**
1586 * ufshcd_make_hba_operational - Make UFS controller operational
1587 * @hba: per adapter instance
1588 *
1589 * To bring UFS host controller to operational state,
1590 * 1. Check if device is present
6ccf44fe
SJ
1591 * 2. Enable required interrupts
1592 * 3. Configure interrupt aggregation
1593 * 4. Program UTRL and UTMRL base addres
1594 * 5. Configure run-stop-registers
7a3e97b0
SY
1595 *
1596 * Returns 0 on success, non-zero value on failure
1597 */
1598static int ufshcd_make_hba_operational(struct ufs_hba *hba)
1599{
1600 int err = 0;
1601 u32 reg;
1602
1603 /* check if device present */
b873a275 1604 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
73ec513a 1605 if (!ufshcd_is_device_present(reg)) {
3b1d0580 1606 dev_err(hba->dev, "cc: Device not present\n");
7a3e97b0
SY
1607 err = -ENXIO;
1608 goto out;
1609 }
1610
6ccf44fe
SJ
1611 /* Enable required interrupts */
1612 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
1613
1614 /* Configure interrupt aggregation */
7d568652 1615 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
6ccf44fe
SJ
1616
1617 /* Configure UTRL and UTMRL base address registers */
1618 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
1619 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
1620 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
1621 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
1622 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
1623 REG_UTP_TASK_REQ_LIST_BASE_L);
1624 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
1625 REG_UTP_TASK_REQ_LIST_BASE_H);
1626
7a3e97b0
SY
1627 /*
1628 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
1629 * DEI, HEI bits must be 0
1630 */
1631 if (!(ufshcd_get_lists_status(reg))) {
1632 ufshcd_enable_run_stop_reg(hba);
1633 } else {
3b1d0580 1634 dev_err(hba->dev,
7a3e97b0
SY
1635 "Host controller not ready to process requests");
1636 err = -EIO;
1637 goto out;
1638 }
1639
7a3e97b0
SY
1640 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
1641 scsi_unblock_requests(hba->host);
1642
1643 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6ccf44fe 1644
7a3e97b0
SY
1645out:
1646 return err;
1647}
1648
1649/**
1650 * ufshcd_hba_enable - initialize the controller
1651 * @hba: per adapter instance
1652 *
1653 * The controller resets itself and controller firmware initialization
1654 * sequence kicks off. When controller is ready it will set
1655 * the Host Controller Enable bit to 1.
1656 *
1657 * Returns 0 on success, non-zero value on failure
1658 */
1659static int ufshcd_hba_enable(struct ufs_hba *hba)
1660{
1661 int retry;
1662
1663 /*
1664 * msleep of 1 and 5 used in this function might result in msleep(20),
1665 * but it was necessary to send the UFS FPGA to reset mode during
1666 * development and testing of this driver. msleep can be changed to
1667 * mdelay and retry count can be reduced based on the controller.
1668 */
1669 if (!ufshcd_is_hba_active(hba)) {
1670
1671 /* change controller state to "reset state" */
1672 ufshcd_hba_stop(hba);
1673
1674 /*
1675 * This delay is based on the testing done with UFS host
1676 * controller FPGA. The delay can be changed based on the
1677 * host controller used.
1678 */
1679 msleep(5);
1680 }
1681
1682 /* start controller initialization sequence */
1683 ufshcd_hba_start(hba);
1684
1685 /*
1686 * To initialize a UFS host controller HCE bit must be set to 1.
1687 * During initialization the HCE bit value changes from 1->0->1.
1688 * When the host controller completes initialization sequence
1689 * it sets the value of HCE bit to 1. The same HCE bit is read back
1690 * to check if the controller has completed initialization sequence.
1691 * So without this delay the value HCE = 1, set in the previous
1692 * instruction might be read back.
1693 * This delay can be changed based on the controller.
1694 */
1695 msleep(1);
1696
1697 /* wait for the host controller to complete initialization */
1698 retry = 10;
1699 while (ufshcd_is_hba_active(hba)) {
1700 if (retry) {
1701 retry--;
1702 } else {
3b1d0580 1703 dev_err(hba->dev,
7a3e97b0
SY
1704 "Controller enable failed\n");
1705 return -EIO;
1706 }
1707 msleep(5);
1708 }
1709 return 0;
1710}
1711
1712/**
6ccf44fe 1713 * ufshcd_link_startup - Initialize unipro link startup
7a3e97b0
SY
1714 * @hba: per adapter instance
1715 *
6ccf44fe 1716 * Returns 0 for success, non-zero in case of failure
7a3e97b0 1717 */
6ccf44fe 1718static int ufshcd_link_startup(struct ufs_hba *hba)
7a3e97b0 1719{
6ccf44fe 1720 int ret;
7a3e97b0 1721
6ccf44fe
SJ
1722 /* enable UIC related interrupts */
1723 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
1724
1725 ret = ufshcd_dme_link_startup(hba);
1726 if (ret)
1727 goto out;
1728
1729 ret = ufshcd_make_hba_operational(hba);
7a3e97b0 1730
6ccf44fe
SJ
1731out:
1732 if (ret)
1733 dev_err(hba->dev, "link startup failed %d\n", ret);
1734 return ret;
7a3e97b0
SY
1735}
1736
5a0b0cb9
SRT
1737/**
1738 * ufshcd_verify_dev_init() - Verify device initialization
1739 * @hba: per-adapter instance
1740 *
1741 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
1742 * device Transport Protocol (UTP) layer is ready after a reset.
1743 * If the UTP layer at the device side is not initialized, it may
1744 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
1745 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
1746 */
1747static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1748{
1749 int err = 0;
1750 int retries;
1751
1752 mutex_lock(&hba->dev_cmd.lock);
1753 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1754 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1755 NOP_OUT_TIMEOUT);
1756
1757 if (!err || err == -ETIMEDOUT)
1758 break;
1759
1760 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1761 }
1762 mutex_unlock(&hba->dev_cmd.lock);
1763
1764 if (err)
1765 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
1766 return err;
1767}
1768
7a3e97b0
SY
1769/**
1770 * ufshcd_do_reset - reset the host controller
1771 * @hba: per adapter instance
1772 *
1773 * Returns SUCCESS/FAILED
1774 */
1775static int ufshcd_do_reset(struct ufs_hba *hba)
1776{
1777 struct ufshcd_lrb *lrbp;
1778 unsigned long flags;
1779 int tag;
1780
1781 /* block commands from midlayer */
1782 scsi_block_requests(hba->host);
1783
1784 spin_lock_irqsave(hba->host->host_lock, flags);
1785 hba->ufshcd_state = UFSHCD_STATE_RESET;
1786
1787 /* send controller to reset state */
1788 ufshcd_hba_stop(hba);
1789 spin_unlock_irqrestore(hba->host->host_lock, flags);
1790
1791 /* abort outstanding commands */
1792 for (tag = 0; tag < hba->nutrs; tag++) {
1793 if (test_bit(tag, &hba->outstanding_reqs)) {
1794 lrbp = &hba->lrb[tag];
5a0b0cb9
SRT
1795 if (lrbp->cmd) {
1796 scsi_dma_unmap(lrbp->cmd);
1797 lrbp->cmd->result = DID_RESET << 16;
1798 lrbp->cmd->scsi_done(lrbp->cmd);
1799 lrbp->cmd = NULL;
1800 clear_bit_unlock(tag, &hba->lrb_in_use);
1801 }
7a3e97b0
SY
1802 }
1803 }
1804
5a0b0cb9
SRT
1805 /* complete device management command */
1806 if (hba->dev_cmd.complete)
1807 complete(hba->dev_cmd.complete);
1808
7a3e97b0
SY
1809 /* clear outstanding request/task bit maps */
1810 hba->outstanding_reqs = 0;
1811 hba->outstanding_tasks = 0;
1812
6ccf44fe
SJ
1813 /* Host controller enable */
1814 if (ufshcd_hba_enable(hba)) {
3b1d0580 1815 dev_err(hba->dev,
7a3e97b0
SY
1816 "Reset: Controller initialization failed\n");
1817 return FAILED;
1818 }
6ccf44fe
SJ
1819
1820 if (ufshcd_link_startup(hba)) {
1821 dev_err(hba->dev,
1822 "Reset: Link start-up failed\n");
1823 return FAILED;
1824 }
1825
7a3e97b0
SY
1826 return SUCCESS;
1827}
1828
1829/**
1830 * ufshcd_slave_alloc - handle initial SCSI device configurations
1831 * @sdev: pointer to SCSI device
1832 *
1833 * Returns success
1834 */
1835static int ufshcd_slave_alloc(struct scsi_device *sdev)
1836{
1837 struct ufs_hba *hba;
1838
1839 hba = shost_priv(sdev->host);
1840 sdev->tagged_supported = 1;
1841
1842 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
1843 sdev->use_10_for_ms = 1;
1844 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
1845
1846 /*
1847 * Inform SCSI Midlayer that the LUN queue depth is same as the
1848 * controller queue depth. If a LUN queue depth is less than the
1849 * controller queue depth and if the LUN reports
1850 * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
1851 * with scsi_adjust_queue_depth.
1852 */
1853 scsi_activate_tcq(sdev, hba->nutrs);
1854 return 0;
1855}
1856
1857/**
1858 * ufshcd_slave_destroy - remove SCSI device configurations
1859 * @sdev: pointer to SCSI device
1860 */
1861static void ufshcd_slave_destroy(struct scsi_device *sdev)
1862{
1863 struct ufs_hba *hba;
1864
1865 hba = shost_priv(sdev->host);
1866 scsi_deactivate_tcq(sdev, hba->nutrs);
1867}
1868
1869/**
1870 * ufshcd_task_req_compl - handle task management request completion
1871 * @hba: per adapter instance
1872 * @index: index of the completed request
1873 *
1874 * Returns SUCCESS/FAILED
1875 */
1876static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
1877{
1878 struct utp_task_req_desc *task_req_descp;
1879 struct utp_upiu_task_rsp *task_rsp_upiup;
1880 unsigned long flags;
1881 int ocs_value;
1882 int task_result;
1883
1884 spin_lock_irqsave(hba->host->host_lock, flags);
1885
1886 /* Clear completed tasks from outstanding_tasks */
1887 __clear_bit(index, &hba->outstanding_tasks);
1888
1889 task_req_descp = hba->utmrdl_base_addr;
1890 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
1891
1892 if (ocs_value == OCS_SUCCESS) {
1893 task_rsp_upiup = (struct utp_upiu_task_rsp *)
1894 task_req_descp[index].task_rsp_upiu;
1895 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
1896 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
1897
fd0f8370 1898 if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
7a3e97b0
SY
1899 task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
1900 task_result = FAILED;
94c122ab
NJ
1901 else
1902 task_result = SUCCESS;
7a3e97b0
SY
1903 } else {
1904 task_result = FAILED;
3b1d0580 1905 dev_err(hba->dev,
7a3e97b0
SY
1906 "trc: Invalid ocs = %x\n", ocs_value);
1907 }
1908 spin_unlock_irqrestore(hba->host->host_lock, flags);
1909 return task_result;
1910}
1911
1912/**
1913 * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
1914 * SAM_STAT_TASK_SET_FULL SCSI command status.
1915 * @cmd: pointer to SCSI command
1916 */
1917static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
1918{
1919 struct ufs_hba *hba;
1920 int i;
1921 int lun_qdepth = 0;
1922
1923 hba = shost_priv(cmd->device->host);
1924
1925 /*
1926 * LUN queue depth can be obtained by counting outstanding commands
1927 * on the LUN.
1928 */
1929 for (i = 0; i < hba->nutrs; i++) {
1930 if (test_bit(i, &hba->outstanding_reqs)) {
1931
1932 /*
1933 * Check if the outstanding command belongs
1934 * to the LUN which reported SAM_STAT_TASK_SET_FULL.
1935 */
1936 if (cmd->device->lun == hba->lrb[i].lun)
1937 lun_qdepth++;
1938 }
1939 }
1940
1941 /*
1942 * LUN queue depth will be total outstanding commands, except the
1943 * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
1944 */
1945 scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
1946}
1947
1948/**
1949 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
1950 * @lrb: pointer to local reference block of completed command
1951 * @scsi_status: SCSI command status
1952 *
1953 * Returns value base on SCSI command status
1954 */
1955static inline int
1956ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
1957{
1958 int result = 0;
1959
1960 switch (scsi_status) {
7a3e97b0 1961 case SAM_STAT_CHECK_CONDITION:
1c2623c5
SJ
1962 ufshcd_copy_sense_data(lrbp);
1963 case SAM_STAT_GOOD:
7a3e97b0
SY
1964 result |= DID_OK << 16 |
1965 COMMAND_COMPLETE << 8 |
1c2623c5 1966 scsi_status;
7a3e97b0
SY
1967 break;
1968 case SAM_STAT_TASK_SET_FULL:
7a3e97b0
SY
1969 /*
1970 * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
1971 * depth needs to be adjusted to the exact number of
1972 * outstanding commands the LUN can handle at any given time.
1973 */
1974 ufshcd_adjust_lun_qdepth(lrbp->cmd);
1c2623c5 1975 case SAM_STAT_BUSY:
7a3e97b0 1976 case SAM_STAT_TASK_ABORTED:
1c2623c5
SJ
1977 ufshcd_copy_sense_data(lrbp);
1978 result |= scsi_status;
7a3e97b0
SY
1979 break;
1980 default:
1981 result |= DID_ERROR << 16;
1982 break;
1983 } /* end of switch */
1984
1985 return result;
1986}
1987
1988/**
1989 * ufshcd_transfer_rsp_status - Get overall status of the response
1990 * @hba: per adapter instance
1991 * @lrb: pointer to local reference block of completed command
1992 *
1993 * Returns result of the command to notify SCSI midlayer
1994 */
1995static inline int
1996ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1997{
1998 int result = 0;
1999 int scsi_status;
2000 int ocs;
2001
2002 /* overall command status of utrd */
2003 ocs = ufshcd_get_tr_ocs(lrbp);
2004
2005 switch (ocs) {
2006 case OCS_SUCCESS:
5a0b0cb9 2007 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
7a3e97b0 2008
5a0b0cb9
SRT
2009 switch (result) {
2010 case UPIU_TRANSACTION_RESPONSE:
2011 /*
2012 * get the response UPIU result to extract
2013 * the SCSI command status
2014 */
2015 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
2016
2017 /*
2018 * get the result based on SCSI status response
2019 * to notify the SCSI midlayer of the command status
2020 */
2021 scsi_status = result & MASK_SCSI_STATUS;
2022 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
66ec6d59
SRT
2023
2024 if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
2025 schedule_work(&hba->eeh_work);
5a0b0cb9
SRT
2026 break;
2027 case UPIU_TRANSACTION_REJECT_UPIU:
2028 /* TODO: handle Reject UPIU Response */
2029 result = DID_ERROR << 16;
3b1d0580 2030 dev_err(hba->dev,
5a0b0cb9
SRT
2031 "Reject UPIU not fully implemented\n");
2032 break;
2033 default:
2034 result = DID_ERROR << 16;
2035 dev_err(hba->dev,
2036 "Unexpected request response code = %x\n",
2037 result);
7a3e97b0
SY
2038 break;
2039 }
7a3e97b0
SY
2040 break;
2041 case OCS_ABORTED:
2042 result |= DID_ABORT << 16;
2043 break;
2044 case OCS_INVALID_CMD_TABLE_ATTR:
2045 case OCS_INVALID_PRDT_ATTR:
2046 case OCS_MISMATCH_DATA_BUF_SIZE:
2047 case OCS_MISMATCH_RESP_UPIU_SIZE:
2048 case OCS_PEER_COMM_FAILURE:
2049 case OCS_FATAL_ERROR:
2050 default:
2051 result |= DID_ERROR << 16;
3b1d0580 2052 dev_err(hba->dev,
7a3e97b0
SY
2053 "OCS error from controller = %x\n", ocs);
2054 break;
2055 } /* end of switch */
2056
2057 return result;
2058}
2059
6ccf44fe
SJ
2060/**
2061 * ufshcd_uic_cmd_compl - handle completion of uic command
2062 * @hba: per adapter instance
53b3d9c3 2063 * @intr_status: interrupt status generated by the controller
6ccf44fe 2064 */
53b3d9c3 2065static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
6ccf44fe 2066{
53b3d9c3 2067 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
6ccf44fe
SJ
2068 hba->active_uic_cmd->argument2 |=
2069 ufshcd_get_uic_cmd_result(hba);
12b4fdb4
SJ
2070 hba->active_uic_cmd->argument3 =
2071 ufshcd_get_dme_attr_val(hba);
6ccf44fe
SJ
2072 complete(&hba->active_uic_cmd->done);
2073 }
53b3d9c3
SJ
2074
2075 if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
2076 complete(hba->pwr_done);
6ccf44fe
SJ
2077}
2078
7a3e97b0
SY
2079/**
2080 * ufshcd_transfer_req_compl - handle SCSI and query command completion
2081 * @hba: per adapter instance
2082 */
2083static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
2084{
5a0b0cb9
SRT
2085 struct ufshcd_lrb *lrbp;
2086 struct scsi_cmnd *cmd;
7a3e97b0
SY
2087 unsigned long completed_reqs;
2088 u32 tr_doorbell;
2089 int result;
2090 int index;
5a0b0cb9 2091 bool int_aggr_reset = false;
7a3e97b0 2092
b873a275 2093 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7a3e97b0
SY
2094 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
2095
2096 for (index = 0; index < hba->nutrs; index++) {
2097 if (test_bit(index, &completed_reqs)) {
5a0b0cb9
SRT
2098 lrbp = &hba->lrb[index];
2099 cmd = lrbp->cmd;
2100 /*
2101 * Don't skip resetting interrupt aggregation counters
2102 * if a regular command is present.
2103 */
2104 int_aggr_reset |= !lrbp->intr_cmd;
7a3e97b0 2105
5a0b0cb9
SRT
2106 if (cmd) {
2107 result = ufshcd_transfer_rsp_status(hba, lrbp);
2108 scsi_dma_unmap(cmd);
2109 cmd->result = result;
7a3e97b0 2110 /* Mark completed command as NULL in LRB */
5a0b0cb9
SRT
2111 lrbp->cmd = NULL;
2112 clear_bit_unlock(index, &hba->lrb_in_use);
2113 /* Do not touch lrbp after scsi done */
2114 cmd->scsi_done(cmd);
2115 } else if (lrbp->command_type ==
2116 UTP_CMD_TYPE_DEV_MANAGE) {
2117 if (hba->dev_cmd.complete)
2118 complete(hba->dev_cmd.complete);
7a3e97b0
SY
2119 }
2120 } /* end of if */
2121 } /* end of for */
2122
2123 /* clear corresponding bits of completed commands */
2124 hba->outstanding_reqs ^= completed_reqs;
2125
5a0b0cb9
SRT
2126 /* we might have free'd some tags above */
2127 wake_up(&hba->dev_cmd.tag_wq);
2128
7a3e97b0 2129 /* Reset interrupt aggregation counters */
5a0b0cb9 2130 if (int_aggr_reset)
7d568652 2131 ufshcd_reset_intr_aggr(hba);
7a3e97b0
SY
2132}
2133
66ec6d59
SRT
2134/**
2135 * ufshcd_disable_ee - disable exception event
2136 * @hba: per-adapter instance
2137 * @mask: exception event to disable
2138 *
2139 * Disables exception event in the device so that the EVENT_ALERT
2140 * bit is not set.
2141 *
2142 * Returns zero on success, non-zero error value on failure.
2143 */
2144static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
2145{
2146 int err = 0;
2147 u32 val;
2148
2149 if (!(hba->ee_ctrl_mask & mask))
2150 goto out;
2151
2152 val = hba->ee_ctrl_mask & ~mask;
2153 val &= 0xFFFF; /* 2 bytes */
2154 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
2155 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
2156 if (!err)
2157 hba->ee_ctrl_mask &= ~mask;
2158out:
2159 return err;
2160}
2161
2162/**
2163 * ufshcd_enable_ee - enable exception event
2164 * @hba: per-adapter instance
2165 * @mask: exception event to enable
2166 *
2167 * Enable corresponding exception event in the device to allow
2168 * device to alert host in critical scenarios.
2169 *
2170 * Returns zero on success, non-zero error value on failure.
2171 */
2172static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
2173{
2174 int err = 0;
2175 u32 val;
2176
2177 if (hba->ee_ctrl_mask & mask)
2178 goto out;
2179
2180 val = hba->ee_ctrl_mask | mask;
2181 val &= 0xFFFF; /* 2 bytes */
2182 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
2183 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
2184 if (!err)
2185 hba->ee_ctrl_mask |= mask;
2186out:
2187 return err;
2188}
2189
2190/**
2191 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
2192 * @hba: per-adapter instance
2193 *
2194 * Allow device to manage background operations on its own. Enabling
2195 * this might lead to inconsistent latencies during normal data transfers
2196 * as the device is allowed to manage its own way of handling background
2197 * operations.
2198 *
2199 * Returns zero on success, non-zero on failure.
2200 */
2201static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
2202{
2203 int err = 0;
2204
2205 if (hba->auto_bkops_enabled)
2206 goto out;
2207
2208 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2209 QUERY_FLAG_IDN_BKOPS_EN, NULL);
2210 if (err) {
2211 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
2212 __func__, err);
2213 goto out;
2214 }
2215
2216 hba->auto_bkops_enabled = true;
2217
2218 /* No need of URGENT_BKOPS exception from the device */
2219 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
2220 if (err)
2221 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
2222 __func__, err);
2223out:
2224 return err;
2225}
2226
2227/**
2228 * ufshcd_disable_auto_bkops - block device in doing background operations
2229 * @hba: per-adapter instance
2230 *
2231 * Disabling background operations improves command response latency but
2232 * has drawback of device moving into critical state where the device is
2233 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
2234 * host is idle so that BKOPS are managed effectively without any negative
2235 * impacts.
2236 *
2237 * Returns zero on success, non-zero on failure.
2238 */
2239static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
2240{
2241 int err = 0;
2242
2243 if (!hba->auto_bkops_enabled)
2244 goto out;
2245
2246 /*
2247 * If host assisted BKOPs is to be enabled, make sure
2248 * urgent bkops exception is allowed.
2249 */
2250 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
2251 if (err) {
2252 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
2253 __func__, err);
2254 goto out;
2255 }
2256
2257 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
2258 QUERY_FLAG_IDN_BKOPS_EN, NULL);
2259 if (err) {
2260 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
2261 __func__, err);
2262 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
2263 goto out;
2264 }
2265
2266 hba->auto_bkops_enabled = false;
2267out:
2268 return err;
2269}
2270
2271/**
2272 * ufshcd_force_reset_auto_bkops - force enable of auto bkops
2273 * @hba: per adapter instance
2274 *
2275 * After a device reset the device may toggle the BKOPS_EN flag
2276 * to default value. The s/w tracking variables should be updated
2277 * as well. Do this by forcing enable of auto bkops.
2278 */
2279static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
2280{
2281 hba->auto_bkops_enabled = false;
2282 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
2283 ufshcd_enable_auto_bkops(hba);
2284}
2285
2286static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
2287{
2288 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2289 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
2290}
2291
2292/**
2293 * ufshcd_urgent_bkops - handle urgent bkops exception event
2294 * @hba: per-adapter instance
2295 *
2296 * Enable fBackgroundOpsEn flag in the device to permit background
2297 * operations.
2298 */
2299static int ufshcd_urgent_bkops(struct ufs_hba *hba)
2300{
2301 int err;
2302 u32 status = 0;
2303
2304 err = ufshcd_get_bkops_status(hba, &status);
2305 if (err) {
2306 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
2307 __func__, err);
2308 goto out;
2309 }
2310
2311 status = status & 0xF;
2312
2313 /* handle only if status indicates performance impact or critical */
2314 if (status >= BKOPS_STATUS_PERF_IMPACT)
2315 err = ufshcd_enable_auto_bkops(hba);
2316out:
2317 return err;
2318}
2319
2320static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
2321{
2322 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2323 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
2324}
2325
2326/**
2327 * ufshcd_exception_event_handler - handle exceptions raised by device
2328 * @work: pointer to work data
2329 *
2330 * Read bExceptionEventStatus attribute from the device and handle the
2331 * exception event accordingly.
2332 */
2333static void ufshcd_exception_event_handler(struct work_struct *work)
2334{
2335 struct ufs_hba *hba;
2336 int err;
2337 u32 status = 0;
2338 hba = container_of(work, struct ufs_hba, eeh_work);
2339
62694735 2340 pm_runtime_get_sync(hba->dev);
66ec6d59
SRT
2341 err = ufshcd_get_ee_status(hba, &status);
2342 if (err) {
2343 dev_err(hba->dev, "%s: failed to get exception status %d\n",
2344 __func__, err);
2345 goto out;
2346 }
2347
2348 status &= hba->ee_ctrl_mask;
2349 if (status & MASK_EE_URGENT_BKOPS) {
2350 err = ufshcd_urgent_bkops(hba);
2351 if (err)
2352 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
2353 __func__, err);
2354 }
2355out:
62694735 2356 pm_runtime_put_sync(hba->dev);
66ec6d59
SRT
2357 return;
2358}
2359
7a3e97b0
SY
2360/**
2361 * ufshcd_fatal_err_handler - handle fatal errors
2362 * @hba: per adapter instance
2363 */
2364static void ufshcd_fatal_err_handler(struct work_struct *work)
2365{
2366 struct ufs_hba *hba;
2367 hba = container_of(work, struct ufs_hba, feh_workq);
2368
62694735 2369 pm_runtime_get_sync(hba->dev);
7a3e97b0
SY
2370 /* check if reset is already in progress */
2371 if (hba->ufshcd_state != UFSHCD_STATE_RESET)
2372 ufshcd_do_reset(hba);
62694735 2373 pm_runtime_put_sync(hba->dev);
7a3e97b0
SY
2374}
2375
2376/**
2377 * ufshcd_err_handler - Check for fatal errors
2378 * @work: pointer to a work queue structure
2379 */
2380static void ufshcd_err_handler(struct ufs_hba *hba)
2381{
2382 u32 reg;
2383
2384 if (hba->errors & INT_FATAL_ERRORS)
2385 goto fatal_eh;
2386
2387 if (hba->errors & UIC_ERROR) {
cf9f4b59 2388 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
7a3e97b0
SY
2389 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
2390 goto fatal_eh;
2391 }
2392 return;
2393fatal_eh:
2394 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2395 schedule_work(&hba->feh_workq);
2396}
2397
2398/**
2399 * ufshcd_tmc_handler - handle task management function completion
2400 * @hba: per adapter instance
2401 */
2402static void ufshcd_tmc_handler(struct ufs_hba *hba)
2403{
2404 u32 tm_doorbell;
2405
b873a275 2406 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
7a3e97b0
SY
2407 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
2408 wake_up_interruptible(&hba->ufshcd_tm_wait_queue);
2409}
2410
2411/**
2412 * ufshcd_sl_intr - Interrupt service routine
2413 * @hba: per adapter instance
2414 * @intr_status: contains interrupts generated by the controller
2415 */
2416static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
2417{
2418 hba->errors = UFSHCD_ERROR_MASK & intr_status;
2419 if (hba->errors)
2420 ufshcd_err_handler(hba);
2421
53b3d9c3
SJ
2422 if (intr_status & UFSHCD_UIC_MASK)
2423 ufshcd_uic_cmd_compl(hba, intr_status);
7a3e97b0
SY
2424
2425 if (intr_status & UTP_TASK_REQ_COMPL)
2426 ufshcd_tmc_handler(hba);
2427
2428 if (intr_status & UTP_TRANSFER_REQ_COMPL)
2429 ufshcd_transfer_req_compl(hba);
2430}
2431
2432/**
2433 * ufshcd_intr - Main interrupt service routine
2434 * @irq: irq number
2435 * @__hba: pointer to adapter instance
2436 *
2437 * Returns IRQ_HANDLED - If interrupt is valid
2438 * IRQ_NONE - If invalid interrupt
2439 */
2440static irqreturn_t ufshcd_intr(int irq, void *__hba)
2441{
2442 u32 intr_status;
2443 irqreturn_t retval = IRQ_NONE;
2444 struct ufs_hba *hba = __hba;
2445
2446 spin_lock(hba->host->host_lock);
b873a275 2447 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7a3e97b0
SY
2448
2449 if (intr_status) {
261ea452 2450 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
7a3e97b0 2451 ufshcd_sl_intr(hba, intr_status);
7a3e97b0
SY
2452 retval = IRQ_HANDLED;
2453 }
2454 spin_unlock(hba->host->host_lock);
2455 return retval;
2456}
2457
2458/**
2459 * ufshcd_issue_tm_cmd - issues task management commands to controller
2460 * @hba: per adapter instance
2461 * @lrbp: pointer to local reference block
2462 *
2463 * Returns SUCCESS/FAILED
2464 */
2465static int
2466ufshcd_issue_tm_cmd(struct ufs_hba *hba,
2467 struct ufshcd_lrb *lrbp,
2468 u8 tm_function)
2469{
2470 struct utp_task_req_desc *task_req_descp;
2471 struct utp_upiu_task_req *task_req_upiup;
2472 struct Scsi_Host *host;
2473 unsigned long flags;
2474 int free_slot = 0;
2475 int err;
2476
2477 host = hba->host;
2478
2479 spin_lock_irqsave(host->host_lock, flags);
2480
2481 /* If task management queue is full */
2482 free_slot = ufshcd_get_tm_free_slot(hba);
2483 if (free_slot >= hba->nutmrs) {
2484 spin_unlock_irqrestore(host->host_lock, flags);
3b1d0580 2485 dev_err(hba->dev, "Task management queue full\n");
7a3e97b0
SY
2486 err = FAILED;
2487 goto out;
2488 }
2489
2490 task_req_descp = hba->utmrdl_base_addr;
2491 task_req_descp += free_slot;
2492
2493 /* Configure task request descriptor */
2494 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
2495 task_req_descp->header.dword_2 =
2496 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2497
2498 /* Configure task request UPIU */
2499 task_req_upiup =
2500 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
2501 task_req_upiup->header.dword_0 =
5a0b0cb9
SRT
2502 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
2503 lrbp->lun, lrbp->task_tag);
7a3e97b0 2504 task_req_upiup->header.dword_1 =
5a0b0cb9 2505 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
7a3e97b0
SY
2506
2507 task_req_upiup->input_param1 = lrbp->lun;
2508 task_req_upiup->input_param1 =
2509 cpu_to_be32(task_req_upiup->input_param1);
2510 task_req_upiup->input_param2 = lrbp->task_tag;
2511 task_req_upiup->input_param2 =
2512 cpu_to_be32(task_req_upiup->input_param2);
2513
2514 /* send command to the controller */
2515 __set_bit(free_slot, &hba->outstanding_tasks);
b873a275 2516 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
7a3e97b0
SY
2517
2518 spin_unlock_irqrestore(host->host_lock, flags);
2519
2520 /* wait until the task management command is completed */
2521 err =
2522 wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue,
2523 (test_bit(free_slot,
2524 &hba->tm_condition) != 0),
2525 60 * HZ);
2526 if (!err) {
3b1d0580 2527 dev_err(hba->dev,
7a3e97b0
SY
2528 "Task management command timed-out\n");
2529 err = FAILED;
2530 goto out;
2531 }
2532 clear_bit(free_slot, &hba->tm_condition);
94c122ab 2533 err = ufshcd_task_req_compl(hba, free_slot);
7a3e97b0
SY
2534out:
2535 return err;
2536}
2537
2538/**
2539 * ufshcd_device_reset - reset device and abort all the pending commands
2540 * @cmd: SCSI command pointer
2541 *
2542 * Returns SUCCESS/FAILED
2543 */
2544static int ufshcd_device_reset(struct scsi_cmnd *cmd)
2545{
2546 struct Scsi_Host *host;
2547 struct ufs_hba *hba;
2548 unsigned int tag;
2549 u32 pos;
2550 int err;
2551
2552 host = cmd->device->host;
2553 hba = shost_priv(host);
2554 tag = cmd->request->tag;
2555
2556 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
94c122ab 2557 if (err == FAILED)
7a3e97b0
SY
2558 goto out;
2559
2560 for (pos = 0; pos < hba->nutrs; pos++) {
2561 if (test_bit(pos, &hba->outstanding_reqs) &&
2562 (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
2563
2564 /* clear the respective UTRLCLR register bit */
2565 ufshcd_utrl_clear(hba, pos);
2566
2567 clear_bit(pos, &hba->outstanding_reqs);
2568
2569 if (hba->lrb[pos].cmd) {
2570 scsi_dma_unmap(hba->lrb[pos].cmd);
2571 hba->lrb[pos].cmd->result =
5a0b0cb9 2572 DID_ABORT << 16;
7a3e97b0
SY
2573 hba->lrb[pos].cmd->scsi_done(cmd);
2574 hba->lrb[pos].cmd = NULL;
5a0b0cb9
SRT
2575 clear_bit_unlock(pos, &hba->lrb_in_use);
2576 wake_up(&hba->dev_cmd.tag_wq);
7a3e97b0
SY
2577 }
2578 }
2579 } /* end of for */
2580out:
2581 return err;
2582}
2583
2584/**
2585 * ufshcd_host_reset - Main reset function registered with scsi layer
2586 * @cmd: SCSI command pointer
2587 *
2588 * Returns SUCCESS/FAILED
2589 */
2590static int ufshcd_host_reset(struct scsi_cmnd *cmd)
2591{
2592 struct ufs_hba *hba;
2593
2594 hba = shost_priv(cmd->device->host);
2595
2596 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
2597 return SUCCESS;
2598
94c122ab 2599 return ufshcd_do_reset(hba);
7a3e97b0
SY
2600}
2601
2602/**
2603 * ufshcd_abort - abort a specific command
2604 * @cmd: SCSI command pointer
2605 *
2606 * Returns SUCCESS/FAILED
2607 */
2608static int ufshcd_abort(struct scsi_cmnd *cmd)
2609{
2610 struct Scsi_Host *host;
2611 struct ufs_hba *hba;
2612 unsigned long flags;
2613 unsigned int tag;
2614 int err;
2615
2616 host = cmd->device->host;
2617 hba = shost_priv(host);
2618 tag = cmd->request->tag;
2619
2620 spin_lock_irqsave(host->host_lock, flags);
2621
2622 /* check if command is still pending */
2623 if (!(test_bit(tag, &hba->outstanding_reqs))) {
2624 err = FAILED;
2625 spin_unlock_irqrestore(host->host_lock, flags);
2626 goto out;
2627 }
2628 spin_unlock_irqrestore(host->host_lock, flags);
2629
2630 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
94c122ab 2631 if (err == FAILED)
7a3e97b0
SY
2632 goto out;
2633
2634 scsi_dma_unmap(cmd);
2635
2636 spin_lock_irqsave(host->host_lock, flags);
2637
2638 /* clear the respective UTRLCLR register bit */
2639 ufshcd_utrl_clear(hba, tag);
2640
2641 __clear_bit(tag, &hba->outstanding_reqs);
2642 hba->lrb[tag].cmd = NULL;
2643 spin_unlock_irqrestore(host->host_lock, flags);
5a0b0cb9
SRT
2644
2645 clear_bit_unlock(tag, &hba->lrb_in_use);
2646 wake_up(&hba->dev_cmd.tag_wq);
7a3e97b0
SY
2647out:
2648 return err;
2649}
2650
6ccf44fe
SJ
2651/**
2652 * ufshcd_async_scan - asynchronous execution for link startup
2653 * @data: data pointer to pass to this function
2654 * @cookie: cookie data
2655 */
2656static void ufshcd_async_scan(void *data, async_cookie_t cookie)
2657{
2658 struct ufs_hba *hba = (struct ufs_hba *)data;
2659 int ret;
2660
2661 ret = ufshcd_link_startup(hba);
5a0b0cb9
SRT
2662 if (ret)
2663 goto out;
2664
2665 ret = ufshcd_verify_dev_init(hba);
2666 if (ret)
2667 goto out;
68078d5c
DR
2668
2669 ret = ufshcd_complete_dev_init(hba);
2670 if (ret)
2671 goto out;
5a0b0cb9 2672
66ec6d59 2673 ufshcd_force_reset_auto_bkops(hba);
5a0b0cb9 2674 scsi_scan_host(hba->host);
62694735 2675 pm_runtime_put_sync(hba->dev);
5a0b0cb9
SRT
2676out:
2677 return;
6ccf44fe
SJ
2678}
2679
7a3e97b0
SY
2680static struct scsi_host_template ufshcd_driver_template = {
2681 .module = THIS_MODULE,
2682 .name = UFSHCD,
2683 .proc_name = UFSHCD,
2684 .queuecommand = ufshcd_queuecommand,
2685 .slave_alloc = ufshcd_slave_alloc,
2686 .slave_destroy = ufshcd_slave_destroy,
2687 .eh_abort_handler = ufshcd_abort,
2688 .eh_device_reset_handler = ufshcd_device_reset,
2689 .eh_host_reset_handler = ufshcd_host_reset,
2690 .this_id = -1,
2691 .sg_tablesize = SG_ALL,
2692 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
2693 .can_queue = UFSHCD_CAN_QUEUE,
2694};
2695
7a3e97b0
SY
2696/**
2697 * ufshcd_suspend - suspend power management function
3b1d0580 2698 * @hba: per adapter instance
7a3e97b0
SY
2699 * @state: power state
2700 *
2701 * Returns -ENOSYS
2702 */
3b1d0580 2703int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
7a3e97b0
SY
2704{
2705 /*
2706 * TODO:
2707 * 1. Block SCSI requests from SCSI midlayer
2708 * 2. Change the internal driver state to non operational
2709 * 3. Set UTRLRSR and UTMRLRSR bits to zero
2710 * 4. Wait until outstanding commands are completed
2711 * 5. Set HCE to zero to send the UFS host controller to reset state
2712 */
2713
2714 return -ENOSYS;
2715}
3b1d0580 2716EXPORT_SYMBOL_GPL(ufshcd_suspend);
7a3e97b0
SY
2717
2718/**
2719 * ufshcd_resume - resume power management function
3b1d0580 2720 * @hba: per adapter instance
7a3e97b0
SY
2721 *
2722 * Returns -ENOSYS
2723 */
3b1d0580 2724int ufshcd_resume(struct ufs_hba *hba)
7a3e97b0
SY
2725{
2726 /*
2727 * TODO:
2728 * 1. Set HCE to 1, to start the UFS host controller
2729 * initialization process
2730 * 2. Set UTRLRSR and UTMRLRSR bits to 1
2731 * 3. Change the internal driver state to operational
2732 * 4. Unblock SCSI requests from SCSI midlayer
2733 */
2734
2735 return -ENOSYS;
2736}
3b1d0580
VH
2737EXPORT_SYMBOL_GPL(ufshcd_resume);
2738
66ec6d59
SRT
2739int ufshcd_runtime_suspend(struct ufs_hba *hba)
2740{
2741 if (!hba)
2742 return 0;
2743
2744 /*
2745 * The device is idle with no requests in the queue,
2746 * allow background operations.
2747 */
2748 return ufshcd_enable_auto_bkops(hba);
2749}
2750EXPORT_SYMBOL(ufshcd_runtime_suspend);
2751
2752int ufshcd_runtime_resume(struct ufs_hba *hba)
2753{
2754 if (!hba)
2755 return 0;
2756
2757 return ufshcd_disable_auto_bkops(hba);
2758}
2759EXPORT_SYMBOL(ufshcd_runtime_resume);
2760
2761int ufshcd_runtime_idle(struct ufs_hba *hba)
2762{
2763 return 0;
2764}
2765EXPORT_SYMBOL(ufshcd_runtime_idle);
2766
7a3e97b0 2767/**
3b1d0580 2768 * ufshcd_remove - de-allocate SCSI host and host memory space
7a3e97b0 2769 * data structure memory
3b1d0580 2770 * @hba - per adapter instance
7a3e97b0 2771 */
3b1d0580 2772void ufshcd_remove(struct ufs_hba *hba)
7a3e97b0 2773{
cfdf9c91 2774 scsi_remove_host(hba->host);
7a3e97b0 2775 /* disable interrupts */
2fbd009b 2776 ufshcd_disable_intr(hba, hba->intr_mask);
7a3e97b0 2777 ufshcd_hba_stop(hba);
7a3e97b0 2778
7a3e97b0 2779 scsi_host_put(hba->host);
3b1d0580
VH
2780}
2781EXPORT_SYMBOL_GPL(ufshcd_remove);
2782
7a3e97b0 2783/**
3b1d0580
VH
2784 * ufshcd_init - Driver initialization routine
2785 * @dev: pointer to device handle
2786 * @hba_handle: driver private handle
2787 * @mmio_base: base register address
2788 * @irq: Interrupt line of device
7a3e97b0
SY
2789 * Returns 0 on success, non-zero value on failure
2790 */
3b1d0580
VH
2791int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
2792 void __iomem *mmio_base, unsigned int irq)
7a3e97b0
SY
2793{
2794 struct Scsi_Host *host;
2795 struct ufs_hba *hba;
2796 int err;
2797
3b1d0580
VH
2798 if (!dev) {
2799 dev_err(dev,
2800 "Invalid memory reference for dev is NULL\n");
2801 err = -ENODEV;
7a3e97b0
SY
2802 goto out_error;
2803 }
2804
3b1d0580
VH
2805 if (!mmio_base) {
2806 dev_err(dev,
2807 "Invalid memory reference for mmio_base is NULL\n");
2808 err = -ENODEV;
2809 goto out_error;
2810 }
7a3e97b0
SY
2811
2812 host = scsi_host_alloc(&ufshcd_driver_template,
2813 sizeof(struct ufs_hba));
2814 if (!host) {
3b1d0580 2815 dev_err(dev, "scsi_host_alloc failed\n");
7a3e97b0 2816 err = -ENOMEM;
3b1d0580 2817 goto out_error;
7a3e97b0
SY
2818 }
2819 hba = shost_priv(host);
7a3e97b0 2820 hba->host = host;
3b1d0580
VH
2821 hba->dev = dev;
2822 hba->mmio_base = mmio_base;
2823 hba->irq = irq;
7a3e97b0
SY
2824
2825 /* Read capabilities registers */
2826 ufshcd_hba_capabilities(hba);
2827
2828 /* Get UFS version supported by the controller */
2829 hba->ufs_version = ufshcd_get_ufs_version(hba);
2830
2fbd009b
SJ
2831 /* Get Interrupt bit mask per version */
2832 hba->intr_mask = ufshcd_get_intr_mask(hba);
2833
7a3e97b0
SY
2834 /* Allocate memory for host memory space */
2835 err = ufshcd_memory_alloc(hba);
2836 if (err) {
3b1d0580
VH
2837 dev_err(hba->dev, "Memory allocation failed\n");
2838 goto out_disable;
7a3e97b0
SY
2839 }
2840
2841 /* Configure LRB */
2842 ufshcd_host_memory_configure(hba);
2843
2844 host->can_queue = hba->nutrs;
2845 host->cmd_per_lun = hba->nutrs;
2846 host->max_id = UFSHCD_MAX_ID;
2847 host->max_lun = UFSHCD_MAX_LUNS;
2848 host->max_channel = UFSHCD_MAX_CHANNEL;
2849 host->unique_id = host->host_no;
2850 host->max_cmd_len = MAX_CDB_SIZE;
2851
2852 /* Initailize wait queue for task management */
2853 init_waitqueue_head(&hba->ufshcd_tm_wait_queue);
2854
2855 /* Initialize work queues */
7a3e97b0 2856 INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
66ec6d59 2857 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
7a3e97b0 2858
6ccf44fe
SJ
2859 /* Initialize UIC command mutex */
2860 mutex_init(&hba->uic_cmd_mutex);
2861
5a0b0cb9
SRT
2862 /* Initialize mutex for device management commands */
2863 mutex_init(&hba->dev_cmd.lock);
2864
2865 /* Initialize device management tag acquire wait queue */
2866 init_waitqueue_head(&hba->dev_cmd.tag_wq);
2867
7a3e97b0 2868 /* IRQ registration */
2953f850 2869 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7a3e97b0 2870 if (err) {
3b1d0580 2871 dev_err(hba->dev, "request irq failed\n");
2953f850 2872 goto out_disable;
7a3e97b0
SY
2873 }
2874
2875 /* Enable SCSI tag mapping */
2876 err = scsi_init_shared_tag_map(host, host->can_queue);
2877 if (err) {
3b1d0580 2878 dev_err(hba->dev, "init shared queue failed\n");
2953f850 2879 goto out_disable;
7a3e97b0
SY
2880 }
2881
3b1d0580 2882 err = scsi_add_host(host, hba->dev);
7a3e97b0 2883 if (err) {
3b1d0580 2884 dev_err(hba->dev, "scsi_add_host failed\n");
2953f850 2885 goto out_disable;
7a3e97b0
SY
2886 }
2887
6ccf44fe
SJ
2888 /* Host controller enable */
2889 err = ufshcd_hba_enable(hba);
7a3e97b0 2890 if (err) {
6ccf44fe 2891 dev_err(hba->dev, "Host controller enable failed\n");
3b1d0580 2892 goto out_remove_scsi_host;
7a3e97b0 2893 }
6ccf44fe 2894
3b1d0580 2895 *hba_handle = hba;
7a3e97b0 2896
62694735
SRT
2897 /* Hold auto suspend until async scan completes */
2898 pm_runtime_get_sync(dev);
2899
6ccf44fe
SJ
2900 async_schedule(ufshcd_async_scan, hba);
2901
7a3e97b0
SY
2902 return 0;
2903
3b1d0580
VH
2904out_remove_scsi_host:
2905 scsi_remove_host(hba->host);
3b1d0580
VH
2906out_disable:
2907 scsi_host_put(host);
2908out_error:
2909 return err;
2910}
2911EXPORT_SYMBOL_GPL(ufshcd_init);
2912
3b1d0580
VH
2913MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
2914MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
e0eca63e 2915MODULE_DESCRIPTION("Generic UFS host controller driver Core");
7a3e97b0
SY
2916MODULE_LICENSE("GPL");
2917MODULE_VERSION(UFSHCD_DRIVER_VERSION);