]>
Commit | Line | Data |
---|---|---|
7a3e97b0 | 1 | /* |
e0eca63e | 2 | * Universal Flash Storage Host controller driver Core |
7a3e97b0 SY |
3 | * |
4 | * This code is based on drivers/scsi/ufs/ufshcd.c | |
3b1d0580 | 5 | * Copyright (C) 2011-2013 Samsung India Software Operations |
7a3e97b0 | 6 | * |
3b1d0580 VH |
7 | * Authors: |
8 | * Santosh Yaraganavi <santosh.sy@samsung.com> | |
9 | * Vinayak Holikatti <h.vinayak@samsung.com> | |
7a3e97b0 SY |
10 | * |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version 2 | |
14 | * of the License, or (at your option) any later version. | |
3b1d0580 VH |
15 | * See the COPYING file in the top-level directory or visit |
16 | * <http://www.gnu.org/licenses/gpl-2.0.html> | |
7a3e97b0 SY |
17 | * |
18 | * This program is distributed in the hope that it will be useful, | |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
21 | * GNU General Public License for more details. | |
22 | * | |
3b1d0580 VH |
23 | * This program is provided "AS IS" and "WITH ALL FAULTS" and |
24 | * without warranty of any kind. You are solely responsible for | |
25 | * determining the appropriateness of using and distributing | |
26 | * the program and assume all risks associated with your exercise | |
27 | * of rights with respect to the program, including but not limited | |
28 | * to infringement of third party rights, the risks and costs of | |
29 | * program errors, damage to or loss of data, programs or equipment, | |
30 | * and unavailability or interruption of operations. Under no | |
31 | * circumstances will the contributor of this Program be liable for | |
32 | * any damages of any kind arising from your use or distribution of | |
33 | * this program. | |
7a3e97b0 SY |
34 | */ |
35 | ||
e0eca63e | 36 | #include "ufshcd.h" |
7a3e97b0 | 37 | |
2fbd009b SJ |
38 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ |
39 | UTP_TASK_REQ_COMPL |\ | |
40 | UFSHCD_ERROR_MASK) | |
41 | ||
7a3e97b0 SY |
42 | enum { |
43 | UFSHCD_MAX_CHANNEL = 0, | |
44 | UFSHCD_MAX_ID = 1, | |
45 | UFSHCD_MAX_LUNS = 8, | |
46 | UFSHCD_CMD_PER_LUN = 32, | |
47 | UFSHCD_CAN_QUEUE = 32, | |
48 | }; | |
49 | ||
50 | /* UFSHCD states */ | |
51 | enum { | |
52 | UFSHCD_STATE_OPERATIONAL, | |
53 | UFSHCD_STATE_RESET, | |
54 | UFSHCD_STATE_ERROR, | |
55 | }; | |
56 | ||
57 | /* Interrupt configuration options */ | |
58 | enum { | |
59 | UFSHCD_INT_DISABLE, | |
60 | UFSHCD_INT_ENABLE, | |
61 | UFSHCD_INT_CLEAR, | |
62 | }; | |
63 | ||
64 | /* Interrupt aggregation options */ | |
65 | enum { | |
66 | INT_AGGR_RESET, | |
67 | INT_AGGR_CONFIG, | |
68 | }; | |
69 | ||
2fbd009b SJ |
70 | /** |
71 | * ufshcd_get_intr_mask - Get the interrupt bit mask | |
72 | * @hba - Pointer to adapter instance | |
73 | * | |
74 | * Returns interrupt bit mask per version | |
75 | */ | |
76 | static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) | |
77 | { | |
78 | if (hba->ufs_version == UFSHCI_VERSION_10) | |
79 | return INTERRUPT_MASK_ALL_VER_10; | |
80 | else | |
81 | return INTERRUPT_MASK_ALL_VER_11; | |
82 | } | |
83 | ||
7a3e97b0 SY |
84 | /** |
85 | * ufshcd_get_ufs_version - Get the UFS version supported by the HBA | |
86 | * @hba - Pointer to adapter instance | |
87 | * | |
88 | * Returns UFSHCI version supported by the controller | |
89 | */ | |
90 | static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) | |
91 | { | |
b873a275 | 92 | return ufshcd_readl(hba, REG_UFS_VERSION); |
7a3e97b0 SY |
93 | } |
94 | ||
95 | /** | |
96 | * ufshcd_is_device_present - Check if any device connected to | |
97 | * the host controller | |
98 | * @reg_hcs - host controller status register value | |
99 | * | |
73ec513a | 100 | * Returns 1 if device present, 0 if no device detected |
7a3e97b0 SY |
101 | */ |
102 | static inline int ufshcd_is_device_present(u32 reg_hcs) | |
103 | { | |
73ec513a | 104 | return (DEVICE_PRESENT & reg_hcs) ? 1 : 0; |
7a3e97b0 SY |
105 | } |
106 | ||
107 | /** | |
108 | * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status | |
109 | * @lrb: pointer to local command reference block | |
110 | * | |
111 | * This function is used to get the OCS field from UTRD | |
112 | * Returns the OCS field in the UTRD | |
113 | */ | |
114 | static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) | |
115 | { | |
116 | return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS; | |
117 | } | |
118 | ||
119 | /** | |
120 | * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status | |
121 | * @task_req_descp: pointer to utp_task_req_desc structure | |
122 | * | |
123 | * This function is used to get the OCS field from UTMRD | |
124 | * Returns the OCS field in the UTMRD | |
125 | */ | |
126 | static inline int | |
127 | ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp) | |
128 | { | |
129 | return task_req_descp->header.dword_2 & MASK_OCS; | |
130 | } | |
131 | ||
132 | /** | |
133 | * ufshcd_get_tm_free_slot - get a free slot for task management request | |
134 | * @hba: per adapter instance | |
135 | * | |
136 | * Returns maximum number of task management request slots in case of | |
137 | * task management queue full or returns the free slot number | |
138 | */ | |
139 | static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba) | |
140 | { | |
141 | return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs); | |
142 | } | |
143 | ||
144 | /** | |
145 | * ufshcd_utrl_clear - Clear a bit in UTRLCLR register | |
146 | * @hba: per adapter instance | |
147 | * @pos: position of the bit to be cleared | |
148 | */ | |
149 | static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) | |
150 | { | |
b873a275 | 151 | ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); |
7a3e97b0 SY |
152 | } |
153 | ||
154 | /** | |
155 | * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY | |
156 | * @reg: Register value of host controller status | |
157 | * | |
158 | * Returns integer, 0 on Success and positive value if failed | |
159 | */ | |
160 | static inline int ufshcd_get_lists_status(u32 reg) | |
161 | { | |
162 | /* | |
163 | * The mask 0xFF is for the following HCS register bits | |
164 | * Bit Description | |
165 | * 0 Device Present | |
166 | * 1 UTRLRDY | |
167 | * 2 UTMRLRDY | |
168 | * 3 UCRDY | |
169 | * 4 HEI | |
170 | * 5 DEI | |
171 | * 6-7 reserved | |
172 | */ | |
173 | return (((reg) & (0xFF)) >> 1) ^ (0x07); | |
174 | } | |
175 | ||
176 | /** | |
177 | * ufshcd_get_uic_cmd_result - Get the UIC command result | |
178 | * @hba: Pointer to adapter instance | |
179 | * | |
180 | * This function gets the result of UIC command completion | |
181 | * Returns 0 on success, non zero value on error | |
182 | */ | |
183 | static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) | |
184 | { | |
b873a275 | 185 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & |
7a3e97b0 SY |
186 | MASK_UIC_COMMAND_RESULT; |
187 | } | |
188 | ||
189 | /** | |
190 | * ufshcd_free_hba_memory - Free allocated memory for LRB, request | |
191 | * and task lists | |
192 | * @hba: Pointer to adapter instance | |
193 | */ | |
194 | static inline void ufshcd_free_hba_memory(struct ufs_hba *hba) | |
195 | { | |
196 | size_t utmrdl_size, utrdl_size, ucdl_size; | |
197 | ||
198 | kfree(hba->lrb); | |
199 | ||
200 | if (hba->utmrdl_base_addr) { | |
201 | utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; | |
3b1d0580 | 202 | dma_free_coherent(hba->dev, utmrdl_size, |
7a3e97b0 SY |
203 | hba->utmrdl_base_addr, hba->utmrdl_dma_addr); |
204 | } | |
205 | ||
206 | if (hba->utrdl_base_addr) { | |
207 | utrdl_size = | |
208 | (sizeof(struct utp_transfer_req_desc) * hba->nutrs); | |
3b1d0580 | 209 | dma_free_coherent(hba->dev, utrdl_size, |
7a3e97b0 SY |
210 | hba->utrdl_base_addr, hba->utrdl_dma_addr); |
211 | } | |
212 | ||
213 | if (hba->ucdl_base_addr) { | |
214 | ucdl_size = | |
215 | (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); | |
3b1d0580 | 216 | dma_free_coherent(hba->dev, ucdl_size, |
7a3e97b0 SY |
217 | hba->ucdl_base_addr, hba->ucdl_dma_addr); |
218 | } | |
219 | } | |
220 | ||
221 | /** | |
222 | * ufshcd_is_valid_req_rsp - checks if controller TR response is valid | |
223 | * @ucd_rsp_ptr: pointer to response UPIU | |
224 | * | |
225 | * This function checks the response UPIU for valid transaction type in | |
226 | * response field | |
227 | * Returns 0 on success, non-zero on failure | |
228 | */ | |
229 | static inline int | |
230 | ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) | |
231 | { | |
232 | return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) == | |
233 | UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16; | |
234 | } | |
235 | ||
236 | /** | |
237 | * ufshcd_get_rsp_upiu_result - Get the result from response UPIU | |
238 | * @ucd_rsp_ptr: pointer to response UPIU | |
239 | * | |
240 | * This function gets the response status and scsi_status from response UPIU | |
241 | * Returns the response result code. | |
242 | */ | |
243 | static inline int | |
244 | ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) | |
245 | { | |
246 | return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; | |
247 | } | |
248 | ||
249 | /** | |
250 | * ufshcd_config_int_aggr - Configure interrupt aggregation values. | |
251 | * Currently there is no use case where we want to configure | |
252 | * interrupt aggregation dynamically. So to configure interrupt | |
253 | * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and | |
254 | * INT_AGGR_TIMEOUT_VALUE are used. | |
255 | * @hba: per adapter instance | |
256 | * @option: Interrupt aggregation option | |
257 | */ | |
258 | static inline void | |
259 | ufshcd_config_int_aggr(struct ufs_hba *hba, int option) | |
260 | { | |
261 | switch (option) { | |
262 | case INT_AGGR_RESET: | |
b873a275 SJ |
263 | ufshcd_writel(hba, INT_AGGR_ENABLE | |
264 | INT_AGGR_COUNTER_AND_TIMER_RESET, | |
265 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | |
7a3e97b0 SY |
266 | break; |
267 | case INT_AGGR_CONFIG: | |
b873a275 SJ |
268 | ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | |
269 | INT_AGGR_COUNTER_THRESHOLD_VALUE | | |
270 | INT_AGGR_TIMEOUT_VALUE, | |
271 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | |
7a3e97b0 SY |
272 | break; |
273 | } | |
274 | } | |
275 | ||
276 | /** | |
277 | * ufshcd_enable_run_stop_reg - Enable run-stop registers, | |
278 | * When run-stop registers are set to 1, it indicates the | |
279 | * host controller that it can process the requests | |
280 | * @hba: per adapter instance | |
281 | */ | |
282 | static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) | |
283 | { | |
b873a275 SJ |
284 | ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, |
285 | REG_UTP_TASK_REQ_LIST_RUN_STOP); | |
286 | ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, | |
287 | REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); | |
7a3e97b0 SY |
288 | } |
289 | ||
7a3e97b0 SY |
290 | /** |
291 | * ufshcd_hba_start - Start controller initialization sequence | |
292 | * @hba: per adapter instance | |
293 | */ | |
294 | static inline void ufshcd_hba_start(struct ufs_hba *hba) | |
295 | { | |
b873a275 | 296 | ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); |
7a3e97b0 SY |
297 | } |
298 | ||
299 | /** | |
300 | * ufshcd_is_hba_active - Get controller state | |
301 | * @hba: per adapter instance | |
302 | * | |
303 | * Returns zero if controller is active, 1 otherwise | |
304 | */ | |
305 | static inline int ufshcd_is_hba_active(struct ufs_hba *hba) | |
306 | { | |
b873a275 | 307 | return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; |
7a3e97b0 SY |
308 | } |
309 | ||
310 | /** | |
311 | * ufshcd_send_command - Send SCSI or device management commands | |
312 | * @hba: per adapter instance | |
313 | * @task_tag: Task tag of the command | |
314 | */ | |
315 | static inline | |
316 | void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) | |
317 | { | |
318 | __set_bit(task_tag, &hba->outstanding_reqs); | |
b873a275 | 319 | ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
7a3e97b0 SY |
320 | } |
321 | ||
322 | /** | |
323 | * ufshcd_copy_sense_data - Copy sense data in case of check condition | |
324 | * @lrb - pointer to local reference block | |
325 | */ | |
326 | static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) | |
327 | { | |
328 | int len; | |
329 | if (lrbp->sense_buffer) { | |
330 | len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len); | |
331 | memcpy(lrbp->sense_buffer, | |
332 | lrbp->ucd_rsp_ptr->sense_data, | |
333 | min_t(int, len, SCSI_SENSE_BUFFERSIZE)); | |
334 | } | |
335 | } | |
336 | ||
337 | /** | |
338 | * ufshcd_hba_capabilities - Read controller capabilities | |
339 | * @hba: per adapter instance | |
340 | */ | |
341 | static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) | |
342 | { | |
b873a275 | 343 | hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); |
7a3e97b0 SY |
344 | |
345 | /* nutrs and nutmrs are 0 based values */ | |
346 | hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; | |
347 | hba->nutmrs = | |
348 | ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; | |
349 | } | |
350 | ||
351 | /** | |
352 | * ufshcd_send_uic_command - Send UIC commands to unipro layers | |
353 | * @hba: per adapter instance | |
354 | * @uic_command: UIC command | |
355 | */ | |
356 | static inline void | |
357 | ufshcd_send_uic_command(struct ufs_hba *hba, struct uic_command *uic_cmnd) | |
358 | { | |
359 | /* Write Args */ | |
b873a275 SJ |
360 | ufshcd_writel(hba, uic_cmnd->argument1, REG_UIC_COMMAND_ARG_1); |
361 | ufshcd_writel(hba, uic_cmnd->argument2, REG_UIC_COMMAND_ARG_2); | |
362 | ufshcd_writel(hba, uic_cmnd->argument3, REG_UIC_COMMAND_ARG_3); | |
7a3e97b0 SY |
363 | |
364 | /* Write UIC Cmd */ | |
b873a275 SJ |
365 | ufshcd_writel(hba, uic_cmnd->command & COMMAND_OPCODE_MASK, |
366 | REG_UIC_COMMAND); | |
7a3e97b0 SY |
367 | } |
368 | ||
369 | /** | |
370 | * ufshcd_map_sg - Map scatter-gather list to prdt | |
371 | * @lrbp - pointer to local reference block | |
372 | * | |
373 | * Returns 0 in case of success, non-zero value in case of failure | |
374 | */ | |
375 | static int ufshcd_map_sg(struct ufshcd_lrb *lrbp) | |
376 | { | |
377 | struct ufshcd_sg_entry *prd_table; | |
378 | struct scatterlist *sg; | |
379 | struct scsi_cmnd *cmd; | |
380 | int sg_segments; | |
381 | int i; | |
382 | ||
383 | cmd = lrbp->cmd; | |
384 | sg_segments = scsi_dma_map(cmd); | |
385 | if (sg_segments < 0) | |
386 | return sg_segments; | |
387 | ||
388 | if (sg_segments) { | |
389 | lrbp->utr_descriptor_ptr->prd_table_length = | |
390 | cpu_to_le16((u16) (sg_segments)); | |
391 | ||
392 | prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; | |
393 | ||
394 | scsi_for_each_sg(cmd, sg, sg_segments, i) { | |
395 | prd_table[i].size = | |
396 | cpu_to_le32(((u32) sg_dma_len(sg))-1); | |
397 | prd_table[i].base_addr = | |
398 | cpu_to_le32(lower_32_bits(sg->dma_address)); | |
399 | prd_table[i].upper_addr = | |
400 | cpu_to_le32(upper_32_bits(sg->dma_address)); | |
401 | } | |
402 | } else { | |
403 | lrbp->utr_descriptor_ptr->prd_table_length = 0; | |
404 | } | |
405 | ||
406 | return 0; | |
407 | } | |
408 | ||
409 | /** | |
2fbd009b | 410 | * ufshcd_enable_intr - enable interrupts |
7a3e97b0 | 411 | * @hba: per adapter instance |
2fbd009b | 412 | * @intrs: interrupt bits |
7a3e97b0 | 413 | */ |
2fbd009b | 414 | static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) |
7a3e97b0 | 415 | { |
2fbd009b SJ |
416 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); |
417 | ||
418 | if (hba->ufs_version == UFSHCI_VERSION_10) { | |
419 | u32 rw; | |
420 | rw = set & INTERRUPT_MASK_RW_VER_10; | |
421 | set = rw | ((set ^ intrs) & intrs); | |
422 | } else { | |
423 | set |= intrs; | |
424 | } | |
425 | ||
426 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); | |
427 | } | |
428 | ||
429 | /** | |
430 | * ufshcd_disable_intr - disable interrupts | |
431 | * @hba: per adapter instance | |
432 | * @intrs: interrupt bits | |
433 | */ | |
434 | static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) | |
435 | { | |
436 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); | |
437 | ||
438 | if (hba->ufs_version == UFSHCI_VERSION_10) { | |
439 | u32 rw; | |
440 | rw = (set & INTERRUPT_MASK_RW_VER_10) & | |
441 | ~(intrs & INTERRUPT_MASK_RW_VER_10); | |
442 | set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10); | |
443 | ||
444 | } else { | |
445 | set &= ~intrs; | |
7a3e97b0 | 446 | } |
2fbd009b SJ |
447 | |
448 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); | |
7a3e97b0 SY |
449 | } |
450 | ||
451 | /** | |
452 | * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU) | |
453 | * @lrb - pointer to local reference block | |
454 | */ | |
455 | static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp) | |
456 | { | |
457 | struct utp_transfer_req_desc *req_desc; | |
458 | struct utp_upiu_cmd *ucd_cmd_ptr; | |
459 | u32 data_direction; | |
460 | u32 upiu_flags; | |
461 | ||
462 | ucd_cmd_ptr = lrbp->ucd_cmd_ptr; | |
463 | req_desc = lrbp->utr_descriptor_ptr; | |
464 | ||
465 | switch (lrbp->command_type) { | |
466 | case UTP_CMD_TYPE_SCSI: | |
467 | if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) { | |
468 | data_direction = UTP_DEVICE_TO_HOST; | |
469 | upiu_flags = UPIU_CMD_FLAGS_READ; | |
470 | } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) { | |
471 | data_direction = UTP_HOST_TO_DEVICE; | |
472 | upiu_flags = UPIU_CMD_FLAGS_WRITE; | |
473 | } else { | |
474 | data_direction = UTP_NO_DATA_TRANSFER; | |
475 | upiu_flags = UPIU_CMD_FLAGS_NONE; | |
476 | } | |
477 | ||
478 | /* Transfer request descriptor header fields */ | |
479 | req_desc->header.dword_0 = | |
480 | cpu_to_le32(data_direction | UTP_SCSI_COMMAND); | |
481 | ||
482 | /* | |
483 | * assigning invalid value for command status. Controller | |
484 | * updates OCS on command completion, with the command | |
485 | * status | |
486 | */ | |
487 | req_desc->header.dword_2 = | |
488 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | |
489 | ||
490 | /* command descriptor fields */ | |
491 | ucd_cmd_ptr->header.dword_0 = | |
492 | cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, | |
493 | upiu_flags, | |
494 | lrbp->lun, | |
495 | lrbp->task_tag)); | |
496 | ucd_cmd_ptr->header.dword_1 = | |
497 | cpu_to_be32( | |
498 | UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, | |
499 | 0, | |
500 | 0, | |
501 | 0)); | |
502 | ||
503 | /* Total EHS length and Data segment length will be zero */ | |
504 | ucd_cmd_ptr->header.dword_2 = 0; | |
505 | ||
506 | ucd_cmd_ptr->exp_data_transfer_len = | |
98b8e179 | 507 | cpu_to_be32(lrbp->cmd->sdb.length); |
7a3e97b0 SY |
508 | |
509 | memcpy(ucd_cmd_ptr->cdb, | |
510 | lrbp->cmd->cmnd, | |
511 | (min_t(unsigned short, | |
512 | lrbp->cmd->cmd_len, | |
513 | MAX_CDB_SIZE))); | |
514 | break; | |
515 | case UTP_CMD_TYPE_DEV_MANAGE: | |
516 | /* For query function implementation */ | |
517 | break; | |
518 | case UTP_CMD_TYPE_UFS: | |
519 | /* For UFS native command implementation */ | |
520 | break; | |
521 | } /* end of switch */ | |
522 | } | |
523 | ||
524 | /** | |
525 | * ufshcd_queuecommand - main entry point for SCSI requests | |
526 | * @cmd: command from SCSI Midlayer | |
527 | * @done: call back function | |
528 | * | |
529 | * Returns 0 for success, non-zero in case of failure | |
530 | */ | |
531 | static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |
532 | { | |
533 | struct ufshcd_lrb *lrbp; | |
534 | struct ufs_hba *hba; | |
535 | unsigned long flags; | |
536 | int tag; | |
537 | int err = 0; | |
538 | ||
539 | hba = shost_priv(host); | |
540 | ||
541 | tag = cmd->request->tag; | |
542 | ||
543 | if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { | |
544 | err = SCSI_MLQUEUE_HOST_BUSY; | |
545 | goto out; | |
546 | } | |
547 | ||
548 | lrbp = &hba->lrb[tag]; | |
549 | ||
550 | lrbp->cmd = cmd; | |
551 | lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; | |
552 | lrbp->sense_buffer = cmd->sense_buffer; | |
553 | lrbp->task_tag = tag; | |
554 | lrbp->lun = cmd->device->lun; | |
555 | ||
556 | lrbp->command_type = UTP_CMD_TYPE_SCSI; | |
557 | ||
558 | /* form UPIU before issuing the command */ | |
559 | ufshcd_compose_upiu(lrbp); | |
560 | err = ufshcd_map_sg(lrbp); | |
561 | if (err) | |
562 | goto out; | |
563 | ||
564 | /* issue command to the controller */ | |
565 | spin_lock_irqsave(hba->host->host_lock, flags); | |
566 | ufshcd_send_command(hba, tag); | |
567 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
568 | out: | |
569 | return err; | |
570 | } | |
571 | ||
572 | /** | |
573 | * ufshcd_memory_alloc - allocate memory for host memory space data structures | |
574 | * @hba: per adapter instance | |
575 | * | |
576 | * 1. Allocate DMA memory for Command Descriptor array | |
577 | * Each command descriptor consist of Command UPIU, Response UPIU and PRDT | |
578 | * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL). | |
579 | * 3. Allocate DMA memory for UTP Task Management Request Descriptor List | |
580 | * (UTMRDL) | |
581 | * 4. Allocate memory for local reference block(lrb). | |
582 | * | |
583 | * Returns 0 for success, non-zero in case of failure | |
584 | */ | |
585 | static int ufshcd_memory_alloc(struct ufs_hba *hba) | |
586 | { | |
587 | size_t utmrdl_size, utrdl_size, ucdl_size; | |
588 | ||
589 | /* Allocate memory for UTP command descriptors */ | |
590 | ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); | |
3b1d0580 | 591 | hba->ucdl_base_addr = dma_alloc_coherent(hba->dev, |
7a3e97b0 SY |
592 | ucdl_size, |
593 | &hba->ucdl_dma_addr, | |
594 | GFP_KERNEL); | |
595 | ||
596 | /* | |
597 | * UFSHCI requires UTP command descriptor to be 128 byte aligned. | |
598 | * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE | |
599 | * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will | |
600 | * be aligned to 128 bytes as well | |
601 | */ | |
602 | if (!hba->ucdl_base_addr || | |
603 | WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { | |
3b1d0580 | 604 | dev_err(hba->dev, |
7a3e97b0 SY |
605 | "Command Descriptor Memory allocation failed\n"); |
606 | goto out; | |
607 | } | |
608 | ||
609 | /* | |
610 | * Allocate memory for UTP Transfer descriptors | |
611 | * UFSHCI requires 1024 byte alignment of UTRD | |
612 | */ | |
613 | utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); | |
3b1d0580 | 614 | hba->utrdl_base_addr = dma_alloc_coherent(hba->dev, |
7a3e97b0 SY |
615 | utrdl_size, |
616 | &hba->utrdl_dma_addr, | |
617 | GFP_KERNEL); | |
618 | if (!hba->utrdl_base_addr || | |
619 | WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { | |
3b1d0580 | 620 | dev_err(hba->dev, |
7a3e97b0 SY |
621 | "Transfer Descriptor Memory allocation failed\n"); |
622 | goto out; | |
623 | } | |
624 | ||
625 | /* | |
626 | * Allocate memory for UTP Task Management descriptors | |
627 | * UFSHCI requires 1024 byte alignment of UTMRD | |
628 | */ | |
629 | utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; | |
3b1d0580 | 630 | hba->utmrdl_base_addr = dma_alloc_coherent(hba->dev, |
7a3e97b0 SY |
631 | utmrdl_size, |
632 | &hba->utmrdl_dma_addr, | |
633 | GFP_KERNEL); | |
634 | if (!hba->utmrdl_base_addr || | |
635 | WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { | |
3b1d0580 | 636 | dev_err(hba->dev, |
7a3e97b0 SY |
637 | "Task Management Descriptor Memory allocation failed\n"); |
638 | goto out; | |
639 | } | |
640 | ||
641 | /* Allocate memory for local reference block */ | |
642 | hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL); | |
643 | if (!hba->lrb) { | |
3b1d0580 | 644 | dev_err(hba->dev, "LRB Memory allocation failed\n"); |
7a3e97b0 SY |
645 | goto out; |
646 | } | |
647 | return 0; | |
648 | out: | |
649 | ufshcd_free_hba_memory(hba); | |
650 | return -ENOMEM; | |
651 | } | |
652 | ||
653 | /** | |
654 | * ufshcd_host_memory_configure - configure local reference block with | |
655 | * memory offsets | |
656 | * @hba: per adapter instance | |
657 | * | |
658 | * Configure Host memory space | |
659 | * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA | |
660 | * address. | |
661 | * 2. Update each UTRD with Response UPIU offset, Response UPIU length | |
662 | * and PRDT offset. | |
663 | * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT | |
664 | * into local reference block. | |
665 | */ | |
666 | static void ufshcd_host_memory_configure(struct ufs_hba *hba) | |
667 | { | |
668 | struct utp_transfer_cmd_desc *cmd_descp; | |
669 | struct utp_transfer_req_desc *utrdlp; | |
670 | dma_addr_t cmd_desc_dma_addr; | |
671 | dma_addr_t cmd_desc_element_addr; | |
672 | u16 response_offset; | |
673 | u16 prdt_offset; | |
674 | int cmd_desc_size; | |
675 | int i; | |
676 | ||
677 | utrdlp = hba->utrdl_base_addr; | |
678 | cmd_descp = hba->ucdl_base_addr; | |
679 | ||
680 | response_offset = | |
681 | offsetof(struct utp_transfer_cmd_desc, response_upiu); | |
682 | prdt_offset = | |
683 | offsetof(struct utp_transfer_cmd_desc, prd_table); | |
684 | ||
685 | cmd_desc_size = sizeof(struct utp_transfer_cmd_desc); | |
686 | cmd_desc_dma_addr = hba->ucdl_dma_addr; | |
687 | ||
688 | for (i = 0; i < hba->nutrs; i++) { | |
689 | /* Configure UTRD with command descriptor base address */ | |
690 | cmd_desc_element_addr = | |
691 | (cmd_desc_dma_addr + (cmd_desc_size * i)); | |
692 | utrdlp[i].command_desc_base_addr_lo = | |
693 | cpu_to_le32(lower_32_bits(cmd_desc_element_addr)); | |
694 | utrdlp[i].command_desc_base_addr_hi = | |
695 | cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); | |
696 | ||
697 | /* Response upiu and prdt offset should be in double words */ | |
698 | utrdlp[i].response_upiu_offset = | |
699 | cpu_to_le16((response_offset >> 2)); | |
700 | utrdlp[i].prd_table_offset = | |
701 | cpu_to_le16((prdt_offset >> 2)); | |
702 | utrdlp[i].response_upiu_length = | |
703 | cpu_to_le16(ALIGNED_UPIU_SIZE); | |
704 | ||
705 | hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); | |
706 | hba->lrb[i].ucd_cmd_ptr = | |
707 | (struct utp_upiu_cmd *)(cmd_descp + i); | |
708 | hba->lrb[i].ucd_rsp_ptr = | |
709 | (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; | |
710 | hba->lrb[i].ucd_prdt_ptr = | |
711 | (struct ufshcd_sg_entry *)cmd_descp[i].prd_table; | |
712 | } | |
713 | } | |
714 | ||
715 | /** | |
716 | * ufshcd_dme_link_startup - Notify Unipro to perform link startup | |
717 | * @hba: per adapter instance | |
718 | * | |
719 | * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer, | |
720 | * in order to initialize the Unipro link startup procedure. | |
721 | * Once the Unipro links are up, the device connected to the controller | |
722 | * is detected. | |
723 | * | |
724 | * Returns 0 on success, non-zero value on failure | |
725 | */ | |
726 | static int ufshcd_dme_link_startup(struct ufs_hba *hba) | |
727 | { | |
728 | struct uic_command *uic_cmd; | |
729 | unsigned long flags; | |
730 | ||
731 | /* check if controller is ready to accept UIC commands */ | |
b873a275 | 732 | if ((ufshcd_readl(hba, REG_CONTROLLER_STATUS) & |
7a3e97b0 | 733 | UIC_COMMAND_READY) == 0x0) { |
3b1d0580 | 734 | dev_err(hba->dev, |
7a3e97b0 SY |
735 | "Controller not ready" |
736 | " to accept UIC commands\n"); | |
737 | return -EIO; | |
738 | } | |
739 | ||
740 | spin_lock_irqsave(hba->host->host_lock, flags); | |
741 | ||
742 | /* form UIC command */ | |
743 | uic_cmd = &hba->active_uic_cmd; | |
744 | uic_cmd->command = UIC_CMD_DME_LINK_STARTUP; | |
745 | uic_cmd->argument1 = 0; | |
746 | uic_cmd->argument2 = 0; | |
747 | uic_cmd->argument3 = 0; | |
748 | ||
749 | /* enable UIC related interrupts */ | |
2fbd009b | 750 | ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); |
7a3e97b0 SY |
751 | |
752 | /* sending UIC commands to controller */ | |
753 | ufshcd_send_uic_command(hba, uic_cmd); | |
754 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
755 | return 0; | |
756 | } | |
757 | ||
758 | /** | |
759 | * ufshcd_make_hba_operational - Make UFS controller operational | |
760 | * @hba: per adapter instance | |
761 | * | |
762 | * To bring UFS host controller to operational state, | |
763 | * 1. Check if device is present | |
764 | * 2. Configure run-stop-registers | |
765 | * 3. Enable required interrupts | |
766 | * 4. Configure interrupt aggregation | |
767 | * | |
768 | * Returns 0 on success, non-zero value on failure | |
769 | */ | |
770 | static int ufshcd_make_hba_operational(struct ufs_hba *hba) | |
771 | { | |
772 | int err = 0; | |
773 | u32 reg; | |
774 | ||
775 | /* check if device present */ | |
b873a275 | 776 | reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); |
73ec513a | 777 | if (!ufshcd_is_device_present(reg)) { |
3b1d0580 | 778 | dev_err(hba->dev, "cc: Device not present\n"); |
7a3e97b0 SY |
779 | err = -ENXIO; |
780 | goto out; | |
781 | } | |
782 | ||
783 | /* | |
784 | * UCRDY, UTMRLDY and UTRLRDY bits must be 1 | |
785 | * DEI, HEI bits must be 0 | |
786 | */ | |
787 | if (!(ufshcd_get_lists_status(reg))) { | |
788 | ufshcd_enable_run_stop_reg(hba); | |
789 | } else { | |
3b1d0580 | 790 | dev_err(hba->dev, |
7a3e97b0 SY |
791 | "Host controller not ready to process requests"); |
792 | err = -EIO; | |
793 | goto out; | |
794 | } | |
795 | ||
796 | /* Enable required interrupts */ | |
2fbd009b | 797 | ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); |
7a3e97b0 SY |
798 | |
799 | /* Configure interrupt aggregation */ | |
800 | ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG); | |
801 | ||
802 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) | |
803 | scsi_unblock_requests(hba->host); | |
804 | ||
805 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; | |
806 | scsi_scan_host(hba->host); | |
807 | out: | |
808 | return err; | |
809 | } | |
810 | ||
811 | /** | |
812 | * ufshcd_hba_enable - initialize the controller | |
813 | * @hba: per adapter instance | |
814 | * | |
815 | * The controller resets itself and controller firmware initialization | |
816 | * sequence kicks off. When controller is ready it will set | |
817 | * the Host Controller Enable bit to 1. | |
818 | * | |
819 | * Returns 0 on success, non-zero value on failure | |
820 | */ | |
821 | static int ufshcd_hba_enable(struct ufs_hba *hba) | |
822 | { | |
823 | int retry; | |
824 | ||
825 | /* | |
826 | * msleep of 1 and 5 used in this function might result in msleep(20), | |
827 | * but it was necessary to send the UFS FPGA to reset mode during | |
828 | * development and testing of this driver. msleep can be changed to | |
829 | * mdelay and retry count can be reduced based on the controller. | |
830 | */ | |
831 | if (!ufshcd_is_hba_active(hba)) { | |
832 | ||
833 | /* change controller state to "reset state" */ | |
834 | ufshcd_hba_stop(hba); | |
835 | ||
836 | /* | |
837 | * This delay is based on the testing done with UFS host | |
838 | * controller FPGA. The delay can be changed based on the | |
839 | * host controller used. | |
840 | */ | |
841 | msleep(5); | |
842 | } | |
843 | ||
844 | /* start controller initialization sequence */ | |
845 | ufshcd_hba_start(hba); | |
846 | ||
847 | /* | |
848 | * To initialize a UFS host controller HCE bit must be set to 1. | |
849 | * During initialization the HCE bit value changes from 1->0->1. | |
850 | * When the host controller completes initialization sequence | |
851 | * it sets the value of HCE bit to 1. The same HCE bit is read back | |
852 | * to check if the controller has completed initialization sequence. | |
853 | * So without this delay the value HCE = 1, set in the previous | |
854 | * instruction might be read back. | |
855 | * This delay can be changed based on the controller. | |
856 | */ | |
857 | msleep(1); | |
858 | ||
859 | /* wait for the host controller to complete initialization */ | |
860 | retry = 10; | |
861 | while (ufshcd_is_hba_active(hba)) { | |
862 | if (retry) { | |
863 | retry--; | |
864 | } else { | |
3b1d0580 | 865 | dev_err(hba->dev, |
7a3e97b0 SY |
866 | "Controller enable failed\n"); |
867 | return -EIO; | |
868 | } | |
869 | msleep(5); | |
870 | } | |
871 | return 0; | |
872 | } | |
873 | ||
874 | /** | |
875 | * ufshcd_initialize_hba - start the initialization process | |
876 | * @hba: per adapter instance | |
877 | * | |
878 | * 1. Enable the controller via ufshcd_hba_enable. | |
879 | * 2. Program the Transfer Request List Address with the starting address of | |
880 | * UTRDL. | |
881 | * 3. Program the Task Management Request List Address with starting address | |
882 | * of UTMRDL. | |
883 | * | |
884 | * Returns 0 on success, non-zero value on failure. | |
885 | */ | |
886 | static int ufshcd_initialize_hba(struct ufs_hba *hba) | |
887 | { | |
888 | if (ufshcd_hba_enable(hba)) | |
889 | return -EIO; | |
890 | ||
891 | /* Configure UTRL and UTMRL base address registers */ | |
b873a275 SJ |
892 | ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), |
893 | REG_UTP_TRANSFER_REQ_LIST_BASE_L); | |
894 | ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), | |
895 | REG_UTP_TRANSFER_REQ_LIST_BASE_H); | |
896 | ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), | |
897 | REG_UTP_TASK_REQ_LIST_BASE_L); | |
898 | ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), | |
899 | REG_UTP_TASK_REQ_LIST_BASE_H); | |
7a3e97b0 SY |
900 | |
901 | /* Initialize unipro link startup procedure */ | |
902 | return ufshcd_dme_link_startup(hba); | |
903 | } | |
904 | ||
905 | /** | |
906 | * ufshcd_do_reset - reset the host controller | |
907 | * @hba: per adapter instance | |
908 | * | |
909 | * Returns SUCCESS/FAILED | |
910 | */ | |
911 | static int ufshcd_do_reset(struct ufs_hba *hba) | |
912 | { | |
913 | struct ufshcd_lrb *lrbp; | |
914 | unsigned long flags; | |
915 | int tag; | |
916 | ||
917 | /* block commands from midlayer */ | |
918 | scsi_block_requests(hba->host); | |
919 | ||
920 | spin_lock_irqsave(hba->host->host_lock, flags); | |
921 | hba->ufshcd_state = UFSHCD_STATE_RESET; | |
922 | ||
923 | /* send controller to reset state */ | |
924 | ufshcd_hba_stop(hba); | |
925 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
926 | ||
927 | /* abort outstanding commands */ | |
928 | for (tag = 0; tag < hba->nutrs; tag++) { | |
929 | if (test_bit(tag, &hba->outstanding_reqs)) { | |
930 | lrbp = &hba->lrb[tag]; | |
931 | scsi_dma_unmap(lrbp->cmd); | |
932 | lrbp->cmd->result = DID_RESET << 16; | |
933 | lrbp->cmd->scsi_done(lrbp->cmd); | |
934 | lrbp->cmd = NULL; | |
935 | } | |
936 | } | |
937 | ||
938 | /* clear outstanding request/task bit maps */ | |
939 | hba->outstanding_reqs = 0; | |
940 | hba->outstanding_tasks = 0; | |
941 | ||
942 | /* start the initialization process */ | |
943 | if (ufshcd_initialize_hba(hba)) { | |
3b1d0580 | 944 | dev_err(hba->dev, |
7a3e97b0 SY |
945 | "Reset: Controller initialization failed\n"); |
946 | return FAILED; | |
947 | } | |
948 | return SUCCESS; | |
949 | } | |
950 | ||
951 | /** | |
952 | * ufshcd_slave_alloc - handle initial SCSI device configurations | |
953 | * @sdev: pointer to SCSI device | |
954 | * | |
955 | * Returns success | |
956 | */ | |
957 | static int ufshcd_slave_alloc(struct scsi_device *sdev) | |
958 | { | |
959 | struct ufs_hba *hba; | |
960 | ||
961 | hba = shost_priv(sdev->host); | |
962 | sdev->tagged_supported = 1; | |
963 | ||
964 | /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ | |
965 | sdev->use_10_for_ms = 1; | |
966 | scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); | |
967 | ||
968 | /* | |
969 | * Inform SCSI Midlayer that the LUN queue depth is same as the | |
970 | * controller queue depth. If a LUN queue depth is less than the | |
971 | * controller queue depth and if the LUN reports | |
972 | * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted | |
973 | * with scsi_adjust_queue_depth. | |
974 | */ | |
975 | scsi_activate_tcq(sdev, hba->nutrs); | |
976 | return 0; | |
977 | } | |
978 | ||
979 | /** | |
980 | * ufshcd_slave_destroy - remove SCSI device configurations | |
981 | * @sdev: pointer to SCSI device | |
982 | */ | |
983 | static void ufshcd_slave_destroy(struct scsi_device *sdev) | |
984 | { | |
985 | struct ufs_hba *hba; | |
986 | ||
987 | hba = shost_priv(sdev->host); | |
988 | scsi_deactivate_tcq(sdev, hba->nutrs); | |
989 | } | |
990 | ||
991 | /** | |
992 | * ufshcd_task_req_compl - handle task management request completion | |
993 | * @hba: per adapter instance | |
994 | * @index: index of the completed request | |
995 | * | |
996 | * Returns SUCCESS/FAILED | |
997 | */ | |
998 | static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) | |
999 | { | |
1000 | struct utp_task_req_desc *task_req_descp; | |
1001 | struct utp_upiu_task_rsp *task_rsp_upiup; | |
1002 | unsigned long flags; | |
1003 | int ocs_value; | |
1004 | int task_result; | |
1005 | ||
1006 | spin_lock_irqsave(hba->host->host_lock, flags); | |
1007 | ||
1008 | /* Clear completed tasks from outstanding_tasks */ | |
1009 | __clear_bit(index, &hba->outstanding_tasks); | |
1010 | ||
1011 | task_req_descp = hba->utmrdl_base_addr; | |
1012 | ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]); | |
1013 | ||
1014 | if (ocs_value == OCS_SUCCESS) { | |
1015 | task_rsp_upiup = (struct utp_upiu_task_rsp *) | |
1016 | task_req_descp[index].task_rsp_upiu; | |
1017 | task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); | |
1018 | task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); | |
1019 | ||
fd0f8370 | 1020 | if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL && |
7a3e97b0 SY |
1021 | task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) |
1022 | task_result = FAILED; | |
94c122ab NJ |
1023 | else |
1024 | task_result = SUCCESS; | |
7a3e97b0 SY |
1025 | } else { |
1026 | task_result = FAILED; | |
3b1d0580 | 1027 | dev_err(hba->dev, |
7a3e97b0 SY |
1028 | "trc: Invalid ocs = %x\n", ocs_value); |
1029 | } | |
1030 | spin_unlock_irqrestore(hba->host->host_lock, flags); | |
1031 | return task_result; | |
1032 | } | |
1033 | ||
1034 | /** | |
1035 | * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with | |
1036 | * SAM_STAT_TASK_SET_FULL SCSI command status. | |
1037 | * @cmd: pointer to SCSI command | |
1038 | */ | |
1039 | static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd) | |
1040 | { | |
1041 | struct ufs_hba *hba; | |
1042 | int i; | |
1043 | int lun_qdepth = 0; | |
1044 | ||
1045 | hba = shost_priv(cmd->device->host); | |
1046 | ||
1047 | /* | |
1048 | * LUN queue depth can be obtained by counting outstanding commands | |
1049 | * on the LUN. | |
1050 | */ | |
1051 | for (i = 0; i < hba->nutrs; i++) { | |
1052 | if (test_bit(i, &hba->outstanding_reqs)) { | |
1053 | ||
1054 | /* | |
1055 | * Check if the outstanding command belongs | |
1056 | * to the LUN which reported SAM_STAT_TASK_SET_FULL. | |
1057 | */ | |
1058 | if (cmd->device->lun == hba->lrb[i].lun) | |
1059 | lun_qdepth++; | |
1060 | } | |
1061 | } | |
1062 | ||
1063 | /* | |
1064 | * LUN queue depth will be total outstanding commands, except the | |
1065 | * command for which the LUN reported SAM_STAT_TASK_SET_FULL. | |
1066 | */ | |
1067 | scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1); | |
1068 | } | |
1069 | ||
1070 | /** | |
1071 | * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status | |
1072 | * @lrb: pointer to local reference block of completed command | |
1073 | * @scsi_status: SCSI command status | |
1074 | * | |
1075 | * Returns value base on SCSI command status | |
1076 | */ | |
1077 | static inline int | |
1078 | ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) | |
1079 | { | |
1080 | int result = 0; | |
1081 | ||
1082 | switch (scsi_status) { | |
1083 | case SAM_STAT_GOOD: | |
1084 | result |= DID_OK << 16 | | |
1085 | COMMAND_COMPLETE << 8 | | |
1086 | SAM_STAT_GOOD; | |
1087 | break; | |
1088 | case SAM_STAT_CHECK_CONDITION: | |
1089 | result |= DID_OK << 16 | | |
1090 | COMMAND_COMPLETE << 8 | | |
1091 | SAM_STAT_CHECK_CONDITION; | |
1092 | ufshcd_copy_sense_data(lrbp); | |
1093 | break; | |
1094 | case SAM_STAT_BUSY: | |
1095 | result |= SAM_STAT_BUSY; | |
1096 | break; | |
1097 | case SAM_STAT_TASK_SET_FULL: | |
1098 | ||
1099 | /* | |
1100 | * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue | |
1101 | * depth needs to be adjusted to the exact number of | |
1102 | * outstanding commands the LUN can handle at any given time. | |
1103 | */ | |
1104 | ufshcd_adjust_lun_qdepth(lrbp->cmd); | |
1105 | result |= SAM_STAT_TASK_SET_FULL; | |
1106 | break; | |
1107 | case SAM_STAT_TASK_ABORTED: | |
1108 | result |= SAM_STAT_TASK_ABORTED; | |
1109 | break; | |
1110 | default: | |
1111 | result |= DID_ERROR << 16; | |
1112 | break; | |
1113 | } /* end of switch */ | |
1114 | ||
1115 | return result; | |
1116 | } | |
1117 | ||
1118 | /** | |
1119 | * ufshcd_transfer_rsp_status - Get overall status of the response | |
1120 | * @hba: per adapter instance | |
1121 | * @lrb: pointer to local reference block of completed command | |
1122 | * | |
1123 | * Returns result of the command to notify SCSI midlayer | |
1124 | */ | |
1125 | static inline int | |
1126 | ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |
1127 | { | |
1128 | int result = 0; | |
1129 | int scsi_status; | |
1130 | int ocs; | |
1131 | ||
1132 | /* overall command status of utrd */ | |
1133 | ocs = ufshcd_get_tr_ocs(lrbp); | |
1134 | ||
1135 | switch (ocs) { | |
1136 | case OCS_SUCCESS: | |
1137 | ||
1138 | /* check if the returned transfer response is valid */ | |
1139 | result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr); | |
1140 | if (result) { | |
3b1d0580 | 1141 | dev_err(hba->dev, |
7a3e97b0 SY |
1142 | "Invalid response = %x\n", result); |
1143 | break; | |
1144 | } | |
1145 | ||
1146 | /* | |
1147 | * get the response UPIU result to extract | |
1148 | * the SCSI command status | |
1149 | */ | |
1150 | result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); | |
1151 | ||
1152 | /* | |
1153 | * get the result based on SCSI status response | |
1154 | * to notify the SCSI midlayer of the command status | |
1155 | */ | |
1156 | scsi_status = result & MASK_SCSI_STATUS; | |
1157 | result = ufshcd_scsi_cmd_status(lrbp, scsi_status); | |
1158 | break; | |
1159 | case OCS_ABORTED: | |
1160 | result |= DID_ABORT << 16; | |
1161 | break; | |
1162 | case OCS_INVALID_CMD_TABLE_ATTR: | |
1163 | case OCS_INVALID_PRDT_ATTR: | |
1164 | case OCS_MISMATCH_DATA_BUF_SIZE: | |
1165 | case OCS_MISMATCH_RESP_UPIU_SIZE: | |
1166 | case OCS_PEER_COMM_FAILURE: | |
1167 | case OCS_FATAL_ERROR: | |
1168 | default: | |
1169 | result |= DID_ERROR << 16; | |
3b1d0580 | 1170 | dev_err(hba->dev, |
7a3e97b0 SY |
1171 | "OCS error from controller = %x\n", ocs); |
1172 | break; | |
1173 | } /* end of switch */ | |
1174 | ||
1175 | return result; | |
1176 | } | |
1177 | ||
1178 | /** | |
1179 | * ufshcd_transfer_req_compl - handle SCSI and query command completion | |
1180 | * @hba: per adapter instance | |
1181 | */ | |
1182 | static void ufshcd_transfer_req_compl(struct ufs_hba *hba) | |
1183 | { | |
1184 | struct ufshcd_lrb *lrb; | |
1185 | unsigned long completed_reqs; | |
1186 | u32 tr_doorbell; | |
1187 | int result; | |
1188 | int index; | |
1189 | ||
1190 | lrb = hba->lrb; | |
b873a275 | 1191 | tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
7a3e97b0 SY |
1192 | completed_reqs = tr_doorbell ^ hba->outstanding_reqs; |
1193 | ||
1194 | for (index = 0; index < hba->nutrs; index++) { | |
1195 | if (test_bit(index, &completed_reqs)) { | |
1196 | ||
1197 | result = ufshcd_transfer_rsp_status(hba, &lrb[index]); | |
1198 | ||
1199 | if (lrb[index].cmd) { | |
1200 | scsi_dma_unmap(lrb[index].cmd); | |
1201 | lrb[index].cmd->result = result; | |
1202 | lrb[index].cmd->scsi_done(lrb[index].cmd); | |
1203 | ||
1204 | /* Mark completed command as NULL in LRB */ | |
1205 | lrb[index].cmd = NULL; | |
1206 | } | |
1207 | } /* end of if */ | |
1208 | } /* end of for */ | |
1209 | ||
1210 | /* clear corresponding bits of completed commands */ | |
1211 | hba->outstanding_reqs ^= completed_reqs; | |
1212 | ||
1213 | /* Reset interrupt aggregation counters */ | |
1214 | ufshcd_config_int_aggr(hba, INT_AGGR_RESET); | |
1215 | } | |
1216 | ||
1217 | /** | |
1218 | * ufshcd_uic_cc_handler - handle UIC command completion | |
1219 | * @work: pointer to a work queue structure | |
1220 | * | |
1221 | * Returns 0 on success, non-zero value on failure | |
1222 | */ | |
1223 | static void ufshcd_uic_cc_handler (struct work_struct *work) | |
1224 | { | |
1225 | struct ufs_hba *hba; | |
1226 | ||
1227 | hba = container_of(work, struct ufs_hba, uic_workq); | |
1228 | ||
1229 | if ((hba->active_uic_cmd.command == UIC_CMD_DME_LINK_STARTUP) && | |
1230 | !(ufshcd_get_uic_cmd_result(hba))) { | |
1231 | ||
1232 | if (ufshcd_make_hba_operational(hba)) | |
3b1d0580 | 1233 | dev_err(hba->dev, |
7a3e97b0 SY |
1234 | "cc: hba not operational state\n"); |
1235 | return; | |
1236 | } | |
1237 | } | |
1238 | ||
1239 | /** | |
1240 | * ufshcd_fatal_err_handler - handle fatal errors | |
1241 | * @hba: per adapter instance | |
1242 | */ | |
1243 | static void ufshcd_fatal_err_handler(struct work_struct *work) | |
1244 | { | |
1245 | struct ufs_hba *hba; | |
1246 | hba = container_of(work, struct ufs_hba, feh_workq); | |
1247 | ||
1248 | /* check if reset is already in progress */ | |
1249 | if (hba->ufshcd_state != UFSHCD_STATE_RESET) | |
1250 | ufshcd_do_reset(hba); | |
1251 | } | |
1252 | ||
1253 | /** | |
1254 | * ufshcd_err_handler - Check for fatal errors | |
1255 | * @work: pointer to a work queue structure | |
1256 | */ | |
1257 | static void ufshcd_err_handler(struct ufs_hba *hba) | |
1258 | { | |
1259 | u32 reg; | |
1260 | ||
1261 | if (hba->errors & INT_FATAL_ERRORS) | |
1262 | goto fatal_eh; | |
1263 | ||
1264 | if (hba->errors & UIC_ERROR) { | |
b873a275 | 1265 | reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); |
7a3e97b0 SY |
1266 | if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) |
1267 | goto fatal_eh; | |
1268 | } | |
1269 | return; | |
1270 | fatal_eh: | |
1271 | hba->ufshcd_state = UFSHCD_STATE_ERROR; | |
1272 | schedule_work(&hba->feh_workq); | |
1273 | } | |
1274 | ||
1275 | /** | |
1276 | * ufshcd_tmc_handler - handle task management function completion | |
1277 | * @hba: per adapter instance | |
1278 | */ | |
1279 | static void ufshcd_tmc_handler(struct ufs_hba *hba) | |
1280 | { | |
1281 | u32 tm_doorbell; | |
1282 | ||
b873a275 | 1283 | tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); |
7a3e97b0 SY |
1284 | hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; |
1285 | wake_up_interruptible(&hba->ufshcd_tm_wait_queue); | |
1286 | } | |
1287 | ||
1288 | /** | |
1289 | * ufshcd_sl_intr - Interrupt service routine | |
1290 | * @hba: per adapter instance | |
1291 | * @intr_status: contains interrupts generated by the controller | |
1292 | */ | |
1293 | static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) | |
1294 | { | |
1295 | hba->errors = UFSHCD_ERROR_MASK & intr_status; | |
1296 | if (hba->errors) | |
1297 | ufshcd_err_handler(hba); | |
1298 | ||
1299 | if (intr_status & UIC_COMMAND_COMPL) | |
1300 | schedule_work(&hba->uic_workq); | |
1301 | ||
1302 | if (intr_status & UTP_TASK_REQ_COMPL) | |
1303 | ufshcd_tmc_handler(hba); | |
1304 | ||
1305 | if (intr_status & UTP_TRANSFER_REQ_COMPL) | |
1306 | ufshcd_transfer_req_compl(hba); | |
1307 | } | |
1308 | ||
1309 | /** | |
1310 | * ufshcd_intr - Main interrupt service routine | |
1311 | * @irq: irq number | |
1312 | * @__hba: pointer to adapter instance | |
1313 | * | |
1314 | * Returns IRQ_HANDLED - If interrupt is valid | |
1315 | * IRQ_NONE - If invalid interrupt | |
1316 | */ | |
1317 | static irqreturn_t ufshcd_intr(int irq, void *__hba) | |
1318 | { | |
1319 | u32 intr_status; | |
1320 | irqreturn_t retval = IRQ_NONE; | |
1321 | struct ufs_hba *hba = __hba; | |
1322 | ||
1323 | spin_lock(hba->host->host_lock); | |
b873a275 | 1324 | intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); |
7a3e97b0 SY |
1325 | |
1326 | if (intr_status) { | |
1327 | ufshcd_sl_intr(hba, intr_status); | |
1328 | ||
1329 | /* If UFSHCI 1.0 then clear interrupt status register */ | |
1330 | if (hba->ufs_version == UFSHCI_VERSION_10) | |
b873a275 | 1331 | ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); |
7a3e97b0 SY |
1332 | retval = IRQ_HANDLED; |
1333 | } | |
1334 | spin_unlock(hba->host->host_lock); | |
1335 | return retval; | |
1336 | } | |
1337 | ||
1338 | /** | |
1339 | * ufshcd_issue_tm_cmd - issues task management commands to controller | |
1340 | * @hba: per adapter instance | |
1341 | * @lrbp: pointer to local reference block | |
1342 | * | |
1343 | * Returns SUCCESS/FAILED | |
1344 | */ | |
1345 | static int | |
1346 | ufshcd_issue_tm_cmd(struct ufs_hba *hba, | |
1347 | struct ufshcd_lrb *lrbp, | |
1348 | u8 tm_function) | |
1349 | { | |
1350 | struct utp_task_req_desc *task_req_descp; | |
1351 | struct utp_upiu_task_req *task_req_upiup; | |
1352 | struct Scsi_Host *host; | |
1353 | unsigned long flags; | |
1354 | int free_slot = 0; | |
1355 | int err; | |
1356 | ||
1357 | host = hba->host; | |
1358 | ||
1359 | spin_lock_irqsave(host->host_lock, flags); | |
1360 | ||
1361 | /* If task management queue is full */ | |
1362 | free_slot = ufshcd_get_tm_free_slot(hba); | |
1363 | if (free_slot >= hba->nutmrs) { | |
1364 | spin_unlock_irqrestore(host->host_lock, flags); | |
3b1d0580 | 1365 | dev_err(hba->dev, "Task management queue full\n"); |
7a3e97b0 SY |
1366 | err = FAILED; |
1367 | goto out; | |
1368 | } | |
1369 | ||
1370 | task_req_descp = hba->utmrdl_base_addr; | |
1371 | task_req_descp += free_slot; | |
1372 | ||
1373 | /* Configure task request descriptor */ | |
1374 | task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD); | |
1375 | task_req_descp->header.dword_2 = | |
1376 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | |
1377 | ||
1378 | /* Configure task request UPIU */ | |
1379 | task_req_upiup = | |
1380 | (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; | |
1381 | task_req_upiup->header.dword_0 = | |
1382 | cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, | |
1383 | lrbp->lun, lrbp->task_tag)); | |
1384 | task_req_upiup->header.dword_1 = | |
1385 | cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0)); | |
1386 | ||
1387 | task_req_upiup->input_param1 = lrbp->lun; | |
1388 | task_req_upiup->input_param1 = | |
1389 | cpu_to_be32(task_req_upiup->input_param1); | |
1390 | task_req_upiup->input_param2 = lrbp->task_tag; | |
1391 | task_req_upiup->input_param2 = | |
1392 | cpu_to_be32(task_req_upiup->input_param2); | |
1393 | ||
1394 | /* send command to the controller */ | |
1395 | __set_bit(free_slot, &hba->outstanding_tasks); | |
b873a275 | 1396 | ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); |
7a3e97b0 SY |
1397 | |
1398 | spin_unlock_irqrestore(host->host_lock, flags); | |
1399 | ||
1400 | /* wait until the task management command is completed */ | |
1401 | err = | |
1402 | wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue, | |
1403 | (test_bit(free_slot, | |
1404 | &hba->tm_condition) != 0), | |
1405 | 60 * HZ); | |
1406 | if (!err) { | |
3b1d0580 | 1407 | dev_err(hba->dev, |
7a3e97b0 SY |
1408 | "Task management command timed-out\n"); |
1409 | err = FAILED; | |
1410 | goto out; | |
1411 | } | |
1412 | clear_bit(free_slot, &hba->tm_condition); | |
94c122ab | 1413 | err = ufshcd_task_req_compl(hba, free_slot); |
7a3e97b0 SY |
1414 | out: |
1415 | return err; | |
1416 | } | |
1417 | ||
1418 | /** | |
1419 | * ufshcd_device_reset - reset device and abort all the pending commands | |
1420 | * @cmd: SCSI command pointer | |
1421 | * | |
1422 | * Returns SUCCESS/FAILED | |
1423 | */ | |
1424 | static int ufshcd_device_reset(struct scsi_cmnd *cmd) | |
1425 | { | |
1426 | struct Scsi_Host *host; | |
1427 | struct ufs_hba *hba; | |
1428 | unsigned int tag; | |
1429 | u32 pos; | |
1430 | int err; | |
1431 | ||
1432 | host = cmd->device->host; | |
1433 | hba = shost_priv(host); | |
1434 | tag = cmd->request->tag; | |
1435 | ||
1436 | err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET); | |
94c122ab | 1437 | if (err == FAILED) |
7a3e97b0 SY |
1438 | goto out; |
1439 | ||
1440 | for (pos = 0; pos < hba->nutrs; pos++) { | |
1441 | if (test_bit(pos, &hba->outstanding_reqs) && | |
1442 | (hba->lrb[tag].lun == hba->lrb[pos].lun)) { | |
1443 | ||
1444 | /* clear the respective UTRLCLR register bit */ | |
1445 | ufshcd_utrl_clear(hba, pos); | |
1446 | ||
1447 | clear_bit(pos, &hba->outstanding_reqs); | |
1448 | ||
1449 | if (hba->lrb[pos].cmd) { | |
1450 | scsi_dma_unmap(hba->lrb[pos].cmd); | |
1451 | hba->lrb[pos].cmd->result = | |
1452 | DID_ABORT << 16; | |
1453 | hba->lrb[pos].cmd->scsi_done(cmd); | |
1454 | hba->lrb[pos].cmd = NULL; | |
1455 | } | |
1456 | } | |
1457 | } /* end of for */ | |
1458 | out: | |
1459 | return err; | |
1460 | } | |
1461 | ||
1462 | /** | |
1463 | * ufshcd_host_reset - Main reset function registered with scsi layer | |
1464 | * @cmd: SCSI command pointer | |
1465 | * | |
1466 | * Returns SUCCESS/FAILED | |
1467 | */ | |
1468 | static int ufshcd_host_reset(struct scsi_cmnd *cmd) | |
1469 | { | |
1470 | struct ufs_hba *hba; | |
1471 | ||
1472 | hba = shost_priv(cmd->device->host); | |
1473 | ||
1474 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) | |
1475 | return SUCCESS; | |
1476 | ||
94c122ab | 1477 | return ufshcd_do_reset(hba); |
7a3e97b0 SY |
1478 | } |
1479 | ||
1480 | /** | |
1481 | * ufshcd_abort - abort a specific command | |
1482 | * @cmd: SCSI command pointer | |
1483 | * | |
1484 | * Returns SUCCESS/FAILED | |
1485 | */ | |
1486 | static int ufshcd_abort(struct scsi_cmnd *cmd) | |
1487 | { | |
1488 | struct Scsi_Host *host; | |
1489 | struct ufs_hba *hba; | |
1490 | unsigned long flags; | |
1491 | unsigned int tag; | |
1492 | int err; | |
1493 | ||
1494 | host = cmd->device->host; | |
1495 | hba = shost_priv(host); | |
1496 | tag = cmd->request->tag; | |
1497 | ||
1498 | spin_lock_irqsave(host->host_lock, flags); | |
1499 | ||
1500 | /* check if command is still pending */ | |
1501 | if (!(test_bit(tag, &hba->outstanding_reqs))) { | |
1502 | err = FAILED; | |
1503 | spin_unlock_irqrestore(host->host_lock, flags); | |
1504 | goto out; | |
1505 | } | |
1506 | spin_unlock_irqrestore(host->host_lock, flags); | |
1507 | ||
1508 | err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK); | |
94c122ab | 1509 | if (err == FAILED) |
7a3e97b0 SY |
1510 | goto out; |
1511 | ||
1512 | scsi_dma_unmap(cmd); | |
1513 | ||
1514 | spin_lock_irqsave(host->host_lock, flags); | |
1515 | ||
1516 | /* clear the respective UTRLCLR register bit */ | |
1517 | ufshcd_utrl_clear(hba, tag); | |
1518 | ||
1519 | __clear_bit(tag, &hba->outstanding_reqs); | |
1520 | hba->lrb[tag].cmd = NULL; | |
1521 | spin_unlock_irqrestore(host->host_lock, flags); | |
1522 | out: | |
1523 | return err; | |
1524 | } | |
1525 | ||
1526 | static struct scsi_host_template ufshcd_driver_template = { | |
1527 | .module = THIS_MODULE, | |
1528 | .name = UFSHCD, | |
1529 | .proc_name = UFSHCD, | |
1530 | .queuecommand = ufshcd_queuecommand, | |
1531 | .slave_alloc = ufshcd_slave_alloc, | |
1532 | .slave_destroy = ufshcd_slave_destroy, | |
1533 | .eh_abort_handler = ufshcd_abort, | |
1534 | .eh_device_reset_handler = ufshcd_device_reset, | |
1535 | .eh_host_reset_handler = ufshcd_host_reset, | |
1536 | .this_id = -1, | |
1537 | .sg_tablesize = SG_ALL, | |
1538 | .cmd_per_lun = UFSHCD_CMD_PER_LUN, | |
1539 | .can_queue = UFSHCD_CAN_QUEUE, | |
1540 | }; | |
1541 | ||
7a3e97b0 SY |
1542 | /** |
1543 | * ufshcd_suspend - suspend power management function | |
3b1d0580 | 1544 | * @hba: per adapter instance |
7a3e97b0 SY |
1545 | * @state: power state |
1546 | * | |
1547 | * Returns -ENOSYS | |
1548 | */ | |
3b1d0580 | 1549 | int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state) |
7a3e97b0 SY |
1550 | { |
1551 | /* | |
1552 | * TODO: | |
1553 | * 1. Block SCSI requests from SCSI midlayer | |
1554 | * 2. Change the internal driver state to non operational | |
1555 | * 3. Set UTRLRSR and UTMRLRSR bits to zero | |
1556 | * 4. Wait until outstanding commands are completed | |
1557 | * 5. Set HCE to zero to send the UFS host controller to reset state | |
1558 | */ | |
1559 | ||
1560 | return -ENOSYS; | |
1561 | } | |
3b1d0580 | 1562 | EXPORT_SYMBOL_GPL(ufshcd_suspend); |
7a3e97b0 SY |
1563 | |
1564 | /** | |
1565 | * ufshcd_resume - resume power management function | |
3b1d0580 | 1566 | * @hba: per adapter instance |
7a3e97b0 SY |
1567 | * |
1568 | * Returns -ENOSYS | |
1569 | */ | |
3b1d0580 | 1570 | int ufshcd_resume(struct ufs_hba *hba) |
7a3e97b0 SY |
1571 | { |
1572 | /* | |
1573 | * TODO: | |
1574 | * 1. Set HCE to 1, to start the UFS host controller | |
1575 | * initialization process | |
1576 | * 2. Set UTRLRSR and UTMRLRSR bits to 1 | |
1577 | * 3. Change the internal driver state to operational | |
1578 | * 4. Unblock SCSI requests from SCSI midlayer | |
1579 | */ | |
1580 | ||
1581 | return -ENOSYS; | |
1582 | } | |
3b1d0580 VH |
1583 | EXPORT_SYMBOL_GPL(ufshcd_resume); |
1584 | ||
7a3e97b0 SY |
1585 | /** |
1586 | * ufshcd_hba_free - free allocated memory for | |
1587 | * host memory space data structures | |
1588 | * @hba: per adapter instance | |
1589 | */ | |
1590 | static void ufshcd_hba_free(struct ufs_hba *hba) | |
1591 | { | |
1592 | iounmap(hba->mmio_base); | |
1593 | ufshcd_free_hba_memory(hba); | |
7a3e97b0 SY |
1594 | } |
1595 | ||
1596 | /** | |
3b1d0580 | 1597 | * ufshcd_remove - de-allocate SCSI host and host memory space |
7a3e97b0 | 1598 | * data structure memory |
3b1d0580 | 1599 | * @hba - per adapter instance |
7a3e97b0 | 1600 | */ |
3b1d0580 | 1601 | void ufshcd_remove(struct ufs_hba *hba) |
7a3e97b0 | 1602 | { |
7a3e97b0 | 1603 | /* disable interrupts */ |
2fbd009b | 1604 | ufshcd_disable_intr(hba, hba->intr_mask); |
7a3e97b0 SY |
1605 | |
1606 | ufshcd_hba_stop(hba); | |
1607 | ufshcd_hba_free(hba); | |
1608 | ||
1609 | scsi_remove_host(hba->host); | |
1610 | scsi_host_put(hba->host); | |
3b1d0580 VH |
1611 | } |
1612 | EXPORT_SYMBOL_GPL(ufshcd_remove); | |
1613 | ||
7a3e97b0 | 1614 | /** |
3b1d0580 VH |
1615 | * ufshcd_init - Driver initialization routine |
1616 | * @dev: pointer to device handle | |
1617 | * @hba_handle: driver private handle | |
1618 | * @mmio_base: base register address | |
1619 | * @irq: Interrupt line of device | |
7a3e97b0 SY |
1620 | * Returns 0 on success, non-zero value on failure |
1621 | */ | |
3b1d0580 VH |
1622 | int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, |
1623 | void __iomem *mmio_base, unsigned int irq) | |
7a3e97b0 SY |
1624 | { |
1625 | struct Scsi_Host *host; | |
1626 | struct ufs_hba *hba; | |
1627 | int err; | |
1628 | ||
3b1d0580 VH |
1629 | if (!dev) { |
1630 | dev_err(dev, | |
1631 | "Invalid memory reference for dev is NULL\n"); | |
1632 | err = -ENODEV; | |
7a3e97b0 SY |
1633 | goto out_error; |
1634 | } | |
1635 | ||
3b1d0580 VH |
1636 | if (!mmio_base) { |
1637 | dev_err(dev, | |
1638 | "Invalid memory reference for mmio_base is NULL\n"); | |
1639 | err = -ENODEV; | |
1640 | goto out_error; | |
1641 | } | |
7a3e97b0 SY |
1642 | |
1643 | host = scsi_host_alloc(&ufshcd_driver_template, | |
1644 | sizeof(struct ufs_hba)); | |
1645 | if (!host) { | |
3b1d0580 | 1646 | dev_err(dev, "scsi_host_alloc failed\n"); |
7a3e97b0 | 1647 | err = -ENOMEM; |
3b1d0580 | 1648 | goto out_error; |
7a3e97b0 SY |
1649 | } |
1650 | hba = shost_priv(host); | |
7a3e97b0 | 1651 | hba->host = host; |
3b1d0580 VH |
1652 | hba->dev = dev; |
1653 | hba->mmio_base = mmio_base; | |
1654 | hba->irq = irq; | |
7a3e97b0 SY |
1655 | |
1656 | /* Read capabilities registers */ | |
1657 | ufshcd_hba_capabilities(hba); | |
1658 | ||
1659 | /* Get UFS version supported by the controller */ | |
1660 | hba->ufs_version = ufshcd_get_ufs_version(hba); | |
1661 | ||
2fbd009b SJ |
1662 | /* Get Interrupt bit mask per version */ |
1663 | hba->intr_mask = ufshcd_get_intr_mask(hba); | |
1664 | ||
7a3e97b0 SY |
1665 | /* Allocate memory for host memory space */ |
1666 | err = ufshcd_memory_alloc(hba); | |
1667 | if (err) { | |
3b1d0580 VH |
1668 | dev_err(hba->dev, "Memory allocation failed\n"); |
1669 | goto out_disable; | |
7a3e97b0 SY |
1670 | } |
1671 | ||
1672 | /* Configure LRB */ | |
1673 | ufshcd_host_memory_configure(hba); | |
1674 | ||
1675 | host->can_queue = hba->nutrs; | |
1676 | host->cmd_per_lun = hba->nutrs; | |
1677 | host->max_id = UFSHCD_MAX_ID; | |
1678 | host->max_lun = UFSHCD_MAX_LUNS; | |
1679 | host->max_channel = UFSHCD_MAX_CHANNEL; | |
1680 | host->unique_id = host->host_no; | |
1681 | host->max_cmd_len = MAX_CDB_SIZE; | |
1682 | ||
1683 | /* Initailize wait queue for task management */ | |
1684 | init_waitqueue_head(&hba->ufshcd_tm_wait_queue); | |
1685 | ||
1686 | /* Initialize work queues */ | |
1687 | INIT_WORK(&hba->uic_workq, ufshcd_uic_cc_handler); | |
1688 | INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); | |
1689 | ||
1690 | /* IRQ registration */ | |
3b1d0580 | 1691 | err = request_irq(irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); |
7a3e97b0 | 1692 | if (err) { |
3b1d0580 | 1693 | dev_err(hba->dev, "request irq failed\n"); |
7a3e97b0 SY |
1694 | goto out_lrb_free; |
1695 | } | |
1696 | ||
1697 | /* Enable SCSI tag mapping */ | |
1698 | err = scsi_init_shared_tag_map(host, host->can_queue); | |
1699 | if (err) { | |
3b1d0580 | 1700 | dev_err(hba->dev, "init shared queue failed\n"); |
7a3e97b0 SY |
1701 | goto out_free_irq; |
1702 | } | |
1703 | ||
3b1d0580 | 1704 | err = scsi_add_host(host, hba->dev); |
7a3e97b0 | 1705 | if (err) { |
3b1d0580 | 1706 | dev_err(hba->dev, "scsi_add_host failed\n"); |
7a3e97b0 SY |
1707 | goto out_free_irq; |
1708 | } | |
1709 | ||
1710 | /* Initialization routine */ | |
1711 | err = ufshcd_initialize_hba(hba); | |
1712 | if (err) { | |
3b1d0580 VH |
1713 | dev_err(hba->dev, "Initialization failed\n"); |
1714 | goto out_remove_scsi_host; | |
7a3e97b0 | 1715 | } |
3b1d0580 | 1716 | *hba_handle = hba; |
7a3e97b0 SY |
1717 | |
1718 | return 0; | |
1719 | ||
3b1d0580 VH |
1720 | out_remove_scsi_host: |
1721 | scsi_remove_host(hba->host); | |
7a3e97b0 | 1722 | out_free_irq: |
3b1d0580 | 1723 | free_irq(irq, hba); |
7a3e97b0 SY |
1724 | out_lrb_free: |
1725 | ufshcd_free_hba_memory(hba); | |
3b1d0580 VH |
1726 | out_disable: |
1727 | scsi_host_put(host); | |
1728 | out_error: | |
1729 | return err; | |
1730 | } | |
1731 | EXPORT_SYMBOL_GPL(ufshcd_init); | |
1732 | ||
3b1d0580 VH |
1733 | MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); |
1734 | MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); | |
e0eca63e | 1735 | MODULE_DESCRIPTION("Generic UFS host controller driver Core"); |
7a3e97b0 SY |
1736 | MODULE_LICENSE("GPL"); |
1737 | MODULE_VERSION(UFSHCD_DRIVER_VERSION); |