]>
Commit | Line | Data |
---|---|---|
b8b69433 HW |
1 | /** @file\r |
2 | The NvmExpressPei driver is used to manage non-volatile memory subsystem\r | |
3 | which follows NVM Express specification at PEI phase.\r | |
4 | \r | |
5 | Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>\r | |
6 | \r | |
7 | This program and the accompanying materials\r | |
8 | are licensed and made available under the terms and conditions\r | |
9 | of the BSD License which accompanies this distribution. The\r | |
10 | full text of the license may be found at\r | |
11 | http://opensource.org/licenses/bsd-license.php\r | |
12 | \r | |
13 | THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r | |
14 | WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r | |
15 | \r | |
16 | **/\r | |
17 | \r | |
18 | #include "NvmExpressPei.h"\r | |
19 | \r | |
20 | /**\r | |
21 | Create PRP lists for Data transfer which is larger than 2 memory pages.\r | |
22 | \r | |
23 | @param[in] Private The pointer to the PEI_NVME_CONTROLLER_PRIVATE_DATA data structure.\r | |
24 | @param[in] PhysicalAddr The physical base address of Data Buffer.\r | |
25 | @param[in] Pages The number of pages to be transfered.\r | |
26 | \r | |
27 | @retval The pointer Value to the first PRP List of the PRP lists.\r | |
28 | \r | |
29 | **/\r | |
30 | UINT64\r | |
31 | NvmeCreatePrpList (\r | |
32 | IN PEI_NVME_CONTROLLER_PRIVATE_DATA *Private,\r | |
33 | IN EFI_PHYSICAL_ADDRESS PhysicalAddr,\r | |
34 | IN UINTN Pages\r | |
35 | )\r | |
36 | {\r | |
37 | UINTN PrpEntryNo;\r | |
38 | UINTN PrpListNo;\r | |
39 | UINT64 PrpListBase;\r | |
40 | VOID *PrpListHost;\r | |
41 | UINTN PrpListIndex;\r | |
42 | UINTN PrpEntryIndex;\r | |
43 | UINT64 Remainder;\r | |
44 | EFI_PHYSICAL_ADDRESS PrpListPhyAddr;\r | |
45 | UINTN Bytes;\r | |
46 | UINT8 *PrpEntry;\r | |
47 | EFI_PHYSICAL_ADDRESS NewPhyAddr;\r | |
48 | \r | |
49 | //\r | |
50 | // The number of Prp Entry in a memory page.\r | |
51 | //\r | |
52 | PrpEntryNo = EFI_PAGE_SIZE / sizeof (UINT64);\r | |
53 | \r | |
54 | //\r | |
55 | // Calculate total PrpList number.\r | |
56 | //\r | |
57 | PrpListNo = (UINTN) DivU64x64Remainder ((UINT64)Pages, (UINT64)PrpEntryNo, &Remainder);\r | |
58 | if (Remainder != 0) {\r | |
59 | PrpListNo += 1;\r | |
60 | }\r | |
61 | \r | |
62 | if (PrpListNo > NVME_PRP_SIZE) {\r | |
63 | DEBUG ((\r | |
64 | DEBUG_ERROR,\r | |
65 | "%a: The implementation only supports PrpList number up to 4."\r | |
66 | " But %d are needed here.\n",\r | |
67 | __FUNCTION__,\r | |
68 | PrpListNo\r | |
69 | ));\r | |
70 | return 0;\r | |
71 | }\r | |
72 | PrpListHost = (VOID *)(UINTN) NVME_PRP_BASE (Private);\r | |
73 | \r | |
74 | Bytes = EFI_PAGES_TO_SIZE (PrpListNo);\r | |
75 | PrpListPhyAddr = (UINT64)(UINTN)(PrpListHost);\r | |
76 | \r | |
77 | //\r | |
78 | // Fill all PRP lists except of last one.\r | |
79 | //\r | |
80 | ZeroMem (PrpListHost, Bytes);\r | |
81 | for (PrpListIndex = 0; PrpListIndex < PrpListNo - 1; ++PrpListIndex) {\r | |
82 | PrpListBase = (UINTN)PrpListHost + PrpListIndex * EFI_PAGE_SIZE;\r | |
83 | \r | |
84 | for (PrpEntryIndex = 0; PrpEntryIndex < PrpEntryNo; ++PrpEntryIndex) {\r | |
85 | PrpEntry = (UINT8 *)(UINTN) (PrpListBase + PrpEntryIndex * sizeof(UINT64));\r | |
86 | if (PrpEntryIndex != PrpEntryNo - 1) {\r | |
87 | //\r | |
88 | // Fill all PRP entries except of last one.\r | |
89 | //\r | |
90 | CopyMem (PrpEntry, (VOID *)(UINTN) (&PhysicalAddr), sizeof (UINT64));\r | |
91 | PhysicalAddr += EFI_PAGE_SIZE;\r | |
92 | } else {\r | |
93 | //\r | |
94 | // Fill last PRP entries with next PRP List pointer.\r | |
95 | //\r | |
96 | NewPhyAddr = (PrpListPhyAddr + (PrpListIndex + 1) * EFI_PAGE_SIZE);\r | |
97 | CopyMem (PrpEntry, (VOID *)(UINTN) (&NewPhyAddr), sizeof (UINT64));\r | |
98 | }\r | |
99 | }\r | |
100 | }\r | |
101 | \r | |
102 | //\r | |
103 | // Fill last PRP list.\r | |
104 | //\r | |
105 | PrpListBase = (UINTN)PrpListHost + PrpListIndex * EFI_PAGE_SIZE;\r | |
106 | for (PrpEntryIndex = 0; PrpEntryIndex < ((Remainder != 0) ? Remainder : PrpEntryNo); ++PrpEntryIndex) {\r | |
107 | PrpEntry = (UINT8 *)(UINTN) (PrpListBase + PrpEntryIndex * sizeof(UINT64));\r | |
108 | CopyMem (PrpEntry, (VOID *)(UINTN) (&PhysicalAddr), sizeof (UINT64));\r | |
109 | \r | |
110 | PhysicalAddr += EFI_PAGE_SIZE;\r | |
111 | }\r | |
112 | \r | |
113 | return PrpListPhyAddr;\r | |
114 | }\r | |
115 | \r | |
116 | /**\r | |
117 | Check the execution status from a given completion queue entry.\r | |
118 | \r | |
119 | @param[in] Cq A pointer to the NVME_CQ item.\r | |
120 | \r | |
121 | **/\r | |
122 | EFI_STATUS\r | |
123 | NvmeCheckCqStatus (\r | |
124 | IN NVME_CQ *Cq\r | |
125 | )\r | |
126 | {\r | |
127 | if (Cq->Sct == 0x0 && Cq->Sc == 0x0) {\r | |
128 | return EFI_SUCCESS;\r | |
129 | }\r | |
130 | \r | |
131 | DEBUG ((DEBUG_INFO, "Dump NVMe Completion Entry Status from [0x%x]:\n", (UINTN)Cq));\r | |
132 | DEBUG ((\r | |
133 | DEBUG_INFO,\r | |
134 | " SQ Identifier : [0x%x], Phase Tag : [%d], Cmd Identifier : [0x%x]\n",\r | |
135 | Cq->Sqid,\r | |
136 | Cq->Pt,\r | |
137 | Cq->Cid\r | |
138 | ));\r | |
139 | DEBUG ((DEBUG_INFO, " Status Code Type : [0x%x], Status Code : [0x%x]\n", Cq->Sct, Cq->Sc));\r | |
140 | DEBUG ((DEBUG_INFO, " NVMe Cmd Execution Result - "));\r | |
141 | \r | |
142 | switch (Cq->Sct) {\r | |
143 | case 0x0:\r | |
144 | switch (Cq->Sc) {\r | |
145 | case 0x0:\r | |
146 | DEBUG ((DEBUG_INFO, "Successful Completion\n"));\r | |
147 | return EFI_SUCCESS;\r | |
148 | case 0x1:\r | |
149 | DEBUG ((DEBUG_INFO, "Invalid Command Opcode\n"));\r | |
150 | break;\r | |
151 | case 0x2:\r | |
152 | DEBUG ((DEBUG_INFO, "Invalid Field in Command\n"));\r | |
153 | break;\r | |
154 | case 0x3:\r | |
155 | DEBUG ((DEBUG_INFO, "Command ID Conflict\n"));\r | |
156 | break;\r | |
157 | case 0x4:\r | |
158 | DEBUG ((DEBUG_INFO, "Data Transfer Error\n"));\r | |
159 | break;\r | |
160 | case 0x5:\r | |
161 | DEBUG ((DEBUG_INFO, "Commands Aborted due to Power Loss Notification\n"));\r | |
162 | break;\r | |
163 | case 0x6:\r | |
164 | DEBUG ((DEBUG_INFO, "Internal Device Error\n"));\r | |
165 | break;\r | |
166 | case 0x7:\r | |
167 | DEBUG ((DEBUG_INFO, "Command Abort Requested\n"));\r | |
168 | break;\r | |
169 | case 0x8:\r | |
170 | DEBUG ((DEBUG_INFO, "Command Aborted due to SQ Deletion\n"));\r | |
171 | break;\r | |
172 | case 0x9:\r | |
173 | DEBUG ((DEBUG_INFO, "Command Aborted due to Failed Fused Command\n"));\r | |
174 | break;\r | |
175 | case 0xA:\r | |
176 | DEBUG ((DEBUG_INFO, "Command Aborted due to Missing Fused Command\n"));\r | |
177 | break;\r | |
178 | case 0xB:\r | |
179 | DEBUG ((DEBUG_INFO, "Invalid Namespace or Format\n"));\r | |
180 | break;\r | |
181 | case 0xC:\r | |
182 | DEBUG ((DEBUG_INFO, "Command Sequence Error\n"));\r | |
183 | break;\r | |
184 | case 0xD:\r | |
185 | DEBUG ((DEBUG_INFO, "Invalid SGL Last Segment Descriptor\n"));\r | |
186 | break;\r | |
187 | case 0xE:\r | |
188 | DEBUG ((DEBUG_INFO, "Invalid Number of SGL Descriptors\n"));\r | |
189 | break;\r | |
190 | case 0xF:\r | |
191 | DEBUG ((DEBUG_INFO, "Data SGL Length Invalid\n"));\r | |
192 | break;\r | |
193 | case 0x10:\r | |
194 | DEBUG ((DEBUG_INFO, "Metadata SGL Length Invalid\n"));\r | |
195 | break;\r | |
196 | case 0x11:\r | |
197 | DEBUG ((DEBUG_INFO, "SGL Descriptor Type Invalid\n"));\r | |
198 | break;\r | |
199 | case 0x80:\r | |
200 | DEBUG ((DEBUG_INFO, "LBA Out of Range\n"));\r | |
201 | break;\r | |
202 | case 0x81:\r | |
203 | DEBUG ((DEBUG_INFO, "Capacity Exceeded\n"));\r | |
204 | break;\r | |
205 | case 0x82:\r | |
206 | DEBUG ((DEBUG_INFO, "Namespace Not Ready\n"));\r | |
207 | break;\r | |
208 | case 0x83:\r | |
209 | DEBUG ((DEBUG_INFO, "Reservation Conflict\n"));\r | |
210 | break;\r | |
211 | }\r | |
212 | break;\r | |
213 | \r | |
214 | case 0x1:\r | |
215 | switch (Cq->Sc) {\r | |
216 | case 0x0:\r | |
217 | DEBUG ((DEBUG_INFO, "Completion Queue Invalid\n"));\r | |
218 | break;\r | |
219 | case 0x1:\r | |
220 | DEBUG ((DEBUG_INFO, "Invalid Queue Identifier\n"));\r | |
221 | break;\r | |
222 | case 0x2:\r | |
223 | DEBUG ((DEBUG_INFO, "Maximum Queue Size Exceeded\n"));\r | |
224 | break;\r | |
225 | case 0x3:\r | |
226 | DEBUG ((DEBUG_INFO, "Abort Command Limit Exceeded\n"));\r | |
227 | break;\r | |
228 | case 0x5:\r | |
229 | DEBUG ((DEBUG_INFO, "Asynchronous Event Request Limit Exceeded\n"));\r | |
230 | break;\r | |
231 | case 0x6:\r | |
232 | DEBUG ((DEBUG_INFO, "Invalid Firmware Slot\n"));\r | |
233 | break;\r | |
234 | case 0x7:\r | |
235 | DEBUG ((DEBUG_INFO, "Invalid Firmware Image\n"));\r | |
236 | break;\r | |
237 | case 0x8:\r | |
238 | DEBUG ((DEBUG_INFO, "Invalid Interrupt Vector\n"));\r | |
239 | break;\r | |
240 | case 0x9:\r | |
241 | DEBUG ((DEBUG_INFO, "Invalid Log Page\n"));\r | |
242 | break;\r | |
243 | case 0xA:\r | |
244 | DEBUG ((DEBUG_INFO, "Invalid Format\n"));\r | |
245 | break;\r | |
246 | case 0xB:\r | |
247 | DEBUG ((DEBUG_INFO, "Firmware Application Requires Conventional Reset\n"));\r | |
248 | break;\r | |
249 | case 0xC:\r | |
250 | DEBUG ((DEBUG_INFO, "Invalid Queue Deletion\n"));\r | |
251 | break;\r | |
252 | case 0xD:\r | |
253 | DEBUG ((DEBUG_INFO, "Feature Identifier Not Saveable\n"));\r | |
254 | break;\r | |
255 | case 0xE:\r | |
256 | DEBUG ((DEBUG_INFO, "Feature Not Changeable\n"));\r | |
257 | break;\r | |
258 | case 0xF:\r | |
259 | DEBUG ((DEBUG_INFO, "Feature Not Namespace Specific\n"));\r | |
260 | break;\r | |
261 | case 0x10:\r | |
262 | DEBUG ((DEBUG_INFO, "Firmware Application Requires NVM Subsystem Reset\n"));\r | |
263 | break;\r | |
264 | case 0x80:\r | |
265 | DEBUG ((DEBUG_INFO, "Conflicting Attributes\n"));\r | |
266 | break;\r | |
267 | case 0x81:\r | |
268 | DEBUG ((DEBUG_INFO, "Invalid Protection Information\n"));\r | |
269 | break;\r | |
270 | case 0x82:\r | |
271 | DEBUG ((DEBUG_INFO, "Attempted Write to Read Only Range\n"));\r | |
272 | break;\r | |
273 | }\r | |
274 | break;\r | |
275 | \r | |
276 | case 0x2:\r | |
277 | switch (Cq->Sc) {\r | |
278 | case 0x80:\r | |
279 | DEBUG ((DEBUG_INFO, "Write Fault\n"));\r | |
280 | break;\r | |
281 | case 0x81:\r | |
282 | DEBUG ((DEBUG_INFO, "Unrecovered Read Error\n"));\r | |
283 | break;\r | |
284 | case 0x82:\r | |
285 | DEBUG ((DEBUG_INFO, "End-to-end Guard Check Error\n"));\r | |
286 | break;\r | |
287 | case 0x83:\r | |
288 | DEBUG ((DEBUG_INFO, "End-to-end Application Tag Check Error\n"));\r | |
289 | break;\r | |
290 | case 0x84:\r | |
291 | DEBUG ((DEBUG_INFO, "End-to-end Reference Tag Check Error\n"));\r | |
292 | break;\r | |
293 | case 0x85:\r | |
294 | DEBUG ((DEBUG_INFO, "Compare Failure\n"));\r | |
295 | break;\r | |
296 | case 0x86:\r | |
297 | DEBUG ((DEBUG_INFO, "Access Denied\n"));\r | |
298 | break;\r | |
299 | }\r | |
300 | break;\r | |
301 | \r | |
302 | default:\r | |
303 | DEBUG ((DEBUG_INFO, "Unknown error\n"));\r | |
304 | break;\r | |
305 | }\r | |
306 | \r | |
307 | return EFI_DEVICE_ERROR;\r | |
308 | }\r | |
309 | \r | |
310 | /**\r | |
311 | Sends an NVM Express Command Packet to an NVM Express controller or namespace. This function only\r | |
312 | supports blocking execution of the command.\r | |
313 | \r | |
314 | @param[in] Private The pointer to the NVME_CONTEXT Data structure.\r | |
315 | @param[in] NamespaceId Is a 32 bit Namespace ID to which the Express HCI command packet will\r | |
316 | be sent.\r | |
317 | A Value of 0 denotes the NVM Express controller, a Value of all 0FFh in\r | |
318 | the namespace ID specifies that the command packet should be sent to all\r | |
319 | valid namespaces.\r | |
320 | @param[in,out] Packet A pointer to the EDKII PEI NVM Express PassThru Command Packet to send\r | |
321 | to the NVMe namespace specified by NamespaceId.\r | |
322 | \r | |
323 | @retval EFI_SUCCESS The EDKII PEI NVM Express Command Packet was sent by the host.\r | |
324 | TransferLength bytes were transferred to, or from DataBuffer.\r | |
325 | @retval EFI_NOT_READY The EDKII PEI NVM Express Command Packet could not be sent because\r | |
326 | the controller is not ready. The caller may retry again later.\r | |
327 | @retval EFI_DEVICE_ERROR A device error occurred while attempting to send the EDKII PEI NVM\r | |
328 | Express Command Packet.\r | |
329 | @retval EFI_INVALID_PARAMETER Namespace, or the contents of EDKII_PEI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET\r | |
330 | are invalid.\r | |
331 | The EDKII PEI NVM Express Command Packet was not sent, so no\r | |
332 | additional status information is available.\r | |
333 | @retval EFI_UNSUPPORTED The command described by the EDKII PEI NVM Express Command Packet\r | |
334 | is not supported by the host adapter.\r | |
335 | The EDKII PEI NVM Express Command Packet was not sent, so no\r | |
336 | additional status information is available.\r | |
337 | @retval EFI_TIMEOUT A timeout occurred while waiting for the EDKII PEI NVM Express Command\r | |
338 | Packet to execute.\r | |
339 | \r | |
340 | **/\r | |
341 | EFI_STATUS\r | |
342 | NvmePassThru (\r | |
343 | IN PEI_NVME_CONTROLLER_PRIVATE_DATA *Private,\r | |
344 | IN UINT32 NamespaceId,\r | |
345 | IN OUT EDKII_PEI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET *Packet\r | |
346 | )\r | |
347 | {\r | |
348 | EFI_STATUS Status;\r | |
349 | NVME_SQ *Sq;\r | |
350 | NVME_CQ *Cq;\r | |
351 | UINT8 QueueId;\r | |
352 | UINTN SqSize;\r | |
353 | UINTN CqSize;\r | |
354 | EDKII_IOMMU_OPERATION MapOp;\r | |
355 | UINTN MapLength;\r | |
356 | EFI_PHYSICAL_ADDRESS PhyAddr;\r | |
357 | VOID *MapData;\r | |
358 | VOID *MapMeta;\r | |
359 | UINT32 Bytes;\r | |
360 | UINT32 Offset;\r | |
361 | UINT32 Data32;\r | |
362 | UINT64 Timer;\r | |
363 | \r | |
364 | //\r | |
365 | // Check the data fields in Packet parameter\r | |
366 | //\r | |
367 | if (Packet == NULL) {\r | |
368 | DEBUG ((\r | |
369 | DEBUG_ERROR,\r | |
370 | "%a, Invalid parameter: Packet(%lx)\n",\r | |
371 | __FUNCTION__,\r | |
372 | (UINTN)Packet\r | |
373 | ));\r | |
374 | return EFI_INVALID_PARAMETER;\r | |
375 | }\r | |
376 | \r | |
377 | if ((Packet->NvmeCmd == NULL) || (Packet->NvmeCompletion == NULL)) {\r | |
378 | DEBUG ((\r | |
379 | DEBUG_ERROR,\r | |
380 | "%a, Invalid parameter: NvmeCmd (%lx)/NvmeCompletion(%lx)\n",\r | |
381 | __FUNCTION__,\r | |
382 | (UINTN)Packet->NvmeCmd,\r | |
383 | (UINTN)Packet->NvmeCompletion\r | |
384 | ));\r | |
385 | return EFI_INVALID_PARAMETER;\r | |
386 | }\r | |
387 | \r | |
388 | if (Packet->QueueType != NVME_ADMIN_QUEUE && Packet->QueueType != NVME_IO_QUEUE) {\r | |
389 | DEBUG ((\r | |
390 | DEBUG_ERROR,\r | |
391 | "%a, Invalid parameter: QueueId(%lx)\n",\r | |
392 | __FUNCTION__,\r | |
393 | (UINTN)Packet->QueueType\r | |
394 | ));\r | |
395 | return EFI_INVALID_PARAMETER;\r | |
396 | }\r | |
397 | \r | |
398 | QueueId = Packet->QueueType;\r | |
399 | Sq = Private->SqBuffer[QueueId] + Private->SqTdbl[QueueId].Sqt;\r | |
400 | Cq = Private->CqBuffer[QueueId] + Private->CqHdbl[QueueId].Cqh;\r | |
401 | if (QueueId == NVME_ADMIN_QUEUE) {\r | |
402 | SqSize = NVME_ASQ_SIZE + 1;\r | |
403 | CqSize = NVME_ACQ_SIZE + 1;\r | |
404 | } else {\r | |
405 | SqSize = NVME_CSQ_SIZE + 1;\r | |
406 | CqSize = NVME_CCQ_SIZE + 1;\r | |
407 | }\r | |
408 | \r | |
409 | if (Packet->NvmeCmd->Nsid != NamespaceId) {\r | |
410 | DEBUG ((\r | |
411 | DEBUG_ERROR,\r | |
412 | "%a: Nsid mismatch (%x, %x)\n",\r | |
413 | __FUNCTION__,\r | |
414 | Packet->NvmeCmd->Nsid,\r | |
415 | NamespaceId\r | |
416 | ));\r | |
417 | return EFI_INVALID_PARAMETER;\r | |
418 | }\r | |
419 | \r | |
420 | ZeroMem (Sq, sizeof (NVME_SQ));\r | |
421 | Sq->Opc = Packet->NvmeCmd->Cdw0.Opcode;\r | |
422 | Sq->Fuse = Packet->NvmeCmd->Cdw0.FusedOperation;\r | |
423 | Sq->Cid = Packet->NvmeCmd->Cdw0.Cid;\r | |
424 | Sq->Nsid = Packet->NvmeCmd->Nsid;\r | |
425 | \r | |
426 | //\r | |
427 | // Currently we only support PRP for data transfer, SGL is NOT supported\r | |
428 | //\r | |
429 | ASSERT (Sq->Psdt == 0);\r | |
430 | if (Sq->Psdt != 0) {\r | |
431 | DEBUG ((DEBUG_ERROR, "%a: Does not support SGL mechanism.\n", __FUNCTION__));\r | |
432 | return EFI_UNSUPPORTED;\r | |
433 | }\r | |
434 | \r | |
435 | Sq->Prp[0] = (UINT64)(UINTN)Packet->TransferBuffer;\r | |
436 | Sq->Prp[1] = 0;\r | |
437 | MapData = NULL;\r | |
438 | MapMeta = NULL;\r | |
439 | Status = EFI_SUCCESS;\r | |
440 | //\r | |
441 | // If the NVMe cmd has data in or out, then mapping the user buffer to the PCI controller\r | |
442 | // specific addresses.\r | |
443 | //\r | |
444 | if ((Sq->Opc & (BIT0 | BIT1)) != 0) {\r | |
445 | if ((Packet->TransferLength == 0) || (Packet->TransferBuffer == NULL)) {\r | |
446 | return EFI_INVALID_PARAMETER;\r | |
447 | }\r | |
448 | \r | |
449 | //\r | |
450 | // Currently, we only support creating IO submission/completion queues that are\r | |
451 | // allocated internally by the driver.\r | |
452 | //\r | |
453 | if ((Packet->QueueType == NVME_ADMIN_QUEUE) &&\r | |
454 | ((Sq->Opc == NVME_ADMIN_CRIOCQ_CMD) || (Sq->Opc == NVME_ADMIN_CRIOSQ_CMD))) {\r | |
455 | if ((Packet->TransferBuffer != Private->SqBuffer[NVME_IO_QUEUE]) &&\r | |
456 | (Packet->TransferBuffer != Private->CqBuffer[NVME_IO_QUEUE])) {\r | |
457 | DEBUG ((\r | |
458 | DEBUG_ERROR,\r | |
459 | "%a: Does not support external IO queues creation request.\n",\r | |
460 | __FUNCTION__\r | |
461 | ));\r | |
462 | return EFI_UNSUPPORTED;\r | |
463 | }\r | |
464 | } else {\r | |
465 | if ((Sq->Opc & BIT0) != 0) {\r | |
466 | MapOp = EdkiiIoMmuOperationBusMasterRead;\r | |
467 | } else {\r | |
468 | MapOp = EdkiiIoMmuOperationBusMasterWrite;\r | |
469 | }\r | |
470 | \r | |
471 | MapLength = Packet->TransferLength;\r | |
472 | Status = IoMmuMap (\r | |
473 | MapOp,\r | |
474 | Packet->TransferBuffer,\r | |
475 | &MapLength,\r | |
476 | &PhyAddr,\r | |
477 | &MapData\r | |
478 | );\r | |
479 | if (EFI_ERROR (Status) || (MapLength != Packet->TransferLength)) {\r | |
480 | Status = EFI_OUT_OF_RESOURCES;\r | |
481 | DEBUG ((DEBUG_ERROR, "%a: Fail to map data buffer.\n", __FUNCTION__));\r | |
482 | goto Exit;\r | |
483 | }\r | |
484 | \r | |
485 | Sq->Prp[0] = PhyAddr;\r | |
486 | \r | |
487 | if((Packet->MetadataLength != 0) && (Packet->MetadataBuffer != NULL)) {\r | |
488 | MapLength = Packet->MetadataLength;\r | |
489 | Status = IoMmuMap (\r | |
490 | MapOp,\r | |
491 | Packet->MetadataBuffer,\r | |
492 | &MapLength,\r | |
493 | &PhyAddr,\r | |
494 | &MapMeta\r | |
495 | );\r | |
496 | if (EFI_ERROR (Status) || (MapLength != Packet->MetadataLength)) {\r | |
497 | Status = EFI_OUT_OF_RESOURCES;\r | |
498 | DEBUG ((DEBUG_ERROR, "%a: Fail to map meta data buffer.\n", __FUNCTION__));\r | |
499 | goto Exit;\r | |
500 | }\r | |
501 | Sq->Mptr = PhyAddr;\r | |
502 | }\r | |
503 | }\r | |
504 | }\r | |
505 | \r | |
506 | //\r | |
507 | // If the Buffer Size spans more than two memory pages (page Size as defined in CC.Mps),\r | |
508 | // then build a PRP list in the second PRP submission queue entry.\r | |
509 | //\r | |
510 | Offset = ((UINT32)Sq->Prp[0]) & (EFI_PAGE_SIZE - 1);\r | |
511 | Bytes = Packet->TransferLength;\r | |
512 | \r | |
513 | if ((Offset + Bytes) > (EFI_PAGE_SIZE * 2)) {\r | |
514 | //\r | |
515 | // Create PrpList for remaining Data Buffer.\r | |
516 | //\r | |
517 | PhyAddr = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1);\r | |
518 | Sq->Prp[1] = NvmeCreatePrpList (\r | |
519 | Private,\r | |
520 | PhyAddr,\r | |
521 | EFI_SIZE_TO_PAGES(Offset + Bytes) - 1\r | |
522 | );\r | |
523 | if (Sq->Prp[1] == 0) {\r | |
524 | Status = EFI_OUT_OF_RESOURCES;\r | |
525 | DEBUG ((DEBUG_ERROR, "%a: Create PRP list fail, Status - %r\n", __FUNCTION__, Status));\r | |
526 | goto Exit;\r | |
527 | }\r | |
528 | \r | |
529 | } else if ((Offset + Bytes) > EFI_PAGE_SIZE) {\r | |
530 | Sq->Prp[1] = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1);\r | |
531 | }\r | |
532 | \r | |
533 | if (Packet->NvmeCmd->Flags & CDW10_VALID) {\r | |
534 | Sq->Payload.Raw.Cdw10 = Packet->NvmeCmd->Cdw10;\r | |
535 | }\r | |
536 | if (Packet->NvmeCmd->Flags & CDW11_VALID) {\r | |
537 | Sq->Payload.Raw.Cdw11 = Packet->NvmeCmd->Cdw11;\r | |
538 | }\r | |
539 | if (Packet->NvmeCmd->Flags & CDW12_VALID) {\r | |
540 | Sq->Payload.Raw.Cdw12 = Packet->NvmeCmd->Cdw12;\r | |
541 | }\r | |
542 | if (Packet->NvmeCmd->Flags & CDW13_VALID) {\r | |
543 | Sq->Payload.Raw.Cdw13 = Packet->NvmeCmd->Cdw13;\r | |
544 | }\r | |
545 | if (Packet->NvmeCmd->Flags & CDW14_VALID) {\r | |
546 | Sq->Payload.Raw.Cdw14 = Packet->NvmeCmd->Cdw14;\r | |
547 | }\r | |
548 | if (Packet->NvmeCmd->Flags & CDW15_VALID) {\r | |
549 | Sq->Payload.Raw.Cdw15 = Packet->NvmeCmd->Cdw15;\r | |
550 | }\r | |
551 | \r | |
552 | //\r | |
553 | // Ring the submission queue doorbell.\r | |
554 | //\r | |
555 | Private->SqTdbl[QueueId].Sqt++;\r | |
556 | if (Private->SqTdbl[QueueId].Sqt == SqSize) {\r | |
557 | Private->SqTdbl[QueueId].Sqt = 0;\r | |
558 | }\r | |
559 | Data32 = ReadUnaligned32 ((UINT32 *)&Private->SqTdbl[QueueId]);\r | |
560 | Status = NVME_SET_SQTDBL (Private, QueueId, &Data32);\r | |
561 | if (EFI_ERROR (Status)) {\r | |
562 | DEBUG ((DEBUG_ERROR, "%a: NVME_SET_SQTDBL fail, Status - %r\n", __FUNCTION__, Status));\r | |
563 | goto Exit;\r | |
564 | }\r | |
565 | \r | |
566 | //\r | |
567 | // Wait for completion queue to get filled in.\r | |
568 | //\r | |
569 | Status = EFI_TIMEOUT;\r | |
570 | Timer = 0;\r | |
571 | while (Timer < Packet->CommandTimeout) {\r | |
572 | if (Cq->Pt != Private->Pt[QueueId]) {\r | |
573 | Status = EFI_SUCCESS;\r | |
574 | break;\r | |
575 | }\r | |
576 | \r | |
577 | MicroSecondDelay (NVME_POLL_INTERVAL);\r | |
578 | Timer += NVME_POLL_INTERVAL;\r | |
579 | }\r | |
580 | \r | |
581 | if (Status == EFI_TIMEOUT) {\r | |
582 | //\r | |
583 | // Timeout occurs for an NVMe command, reset the controller to abort the outstanding command\r | |
584 | //\r | |
585 | DEBUG ((DEBUG_ERROR, "%a: Timeout occurs for the PassThru command.\n", __FUNCTION__));\r | |
586 | Status = NvmeControllerInit (Private);\r | |
587 | if (EFI_ERROR (Status)) {\r | |
588 | Status = EFI_DEVICE_ERROR;\r | |
589 | } else {\r | |
590 | //\r | |
591 | // Return EFI_TIMEOUT to indicate a timeout occurs for PassThru command\r | |
592 | //\r | |
593 | Status = EFI_TIMEOUT;\r | |
594 | }\r | |
595 | goto Exit;\r | |
596 | }\r | |
597 | \r | |
598 | //\r | |
599 | // Move forward the Completion Queue head\r | |
600 | //\r | |
601 | Private->CqHdbl[QueueId].Cqh++;\r | |
602 | if (Private->CqHdbl[QueueId].Cqh == CqSize) {\r | |
603 | Private->CqHdbl[QueueId].Cqh = 0;\r | |
604 | Private->Pt[QueueId] ^= 1;\r | |
605 | }\r | |
606 | \r | |
607 | //\r | |
608 | // Copy the Respose Queue entry for this command to the callers response buffer\r | |
609 | //\r | |
610 | CopyMem (Packet->NvmeCompletion, Cq, sizeof (EDKII_PEI_NVM_EXPRESS_COMPLETION));\r | |
611 | \r | |
612 | //\r | |
613 | // Check the NVMe cmd execution result\r | |
614 | //\r | |
615 | Status = NvmeCheckCqStatus (Cq);\r | |
616 | NVME_SET_CQHDBL (Private, QueueId, &Private->CqHdbl[QueueId]);\r | |
617 | \r | |
618 | Exit:\r | |
619 | if (MapMeta != NULL) {\r | |
620 | IoMmuUnmap (MapMeta);\r | |
621 | }\r | |
622 | \r | |
623 | if (MapData != NULL) {\r | |
624 | IoMmuUnmap (MapData);\r | |
625 | }\r | |
626 | \r | |
627 | return Status;\r | |
628 | }\r |