]>
Commit | Line | Data |
---|---|---|
b8b69433 HW |
1 | /** @file\r |
2 | The NvmExpressPei driver is used to manage non-volatile memory subsystem\r | |
3 | which follows NVM Express specification at PEI phase.\r | |
4 | \r | |
5 | Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>\r | |
6 | \r | |
9d510e61 | 7 | SPDX-License-Identifier: BSD-2-Clause-Patent\r |
b8b69433 HW |
8 | \r |
9 | **/\r | |
10 | \r | |
11 | #include "NvmExpressPei.h"\r | |
12 | \r | |
13 | /**\r | |
14 | Create PRP lists for Data transfer which is larger than 2 memory pages.\r | |
15 | \r | |
16 | @param[in] Private The pointer to the PEI_NVME_CONTROLLER_PRIVATE_DATA data structure.\r | |
17 | @param[in] PhysicalAddr The physical base address of Data Buffer.\r | |
18 | @param[in] Pages The number of pages to be transfered.\r | |
19 | \r | |
20 | @retval The pointer Value to the first PRP List of the PRP lists.\r | |
21 | \r | |
22 | **/\r | |
23 | UINT64\r | |
24 | NvmeCreatePrpList (\r | |
25 | IN PEI_NVME_CONTROLLER_PRIVATE_DATA *Private,\r | |
26 | IN EFI_PHYSICAL_ADDRESS PhysicalAddr,\r | |
27 | IN UINTN Pages\r | |
28 | )\r | |
29 | {\r | |
30 | UINTN PrpEntryNo;\r | |
31 | UINTN PrpListNo;\r | |
32 | UINT64 PrpListBase;\r | |
33 | VOID *PrpListHost;\r | |
34 | UINTN PrpListIndex;\r | |
35 | UINTN PrpEntryIndex;\r | |
36 | UINT64 Remainder;\r | |
37 | EFI_PHYSICAL_ADDRESS PrpListPhyAddr;\r | |
38 | UINTN Bytes;\r | |
39 | UINT8 *PrpEntry;\r | |
40 | EFI_PHYSICAL_ADDRESS NewPhyAddr;\r | |
41 | \r | |
42 | //\r | |
43 | // The number of Prp Entry in a memory page.\r | |
44 | //\r | |
45 | PrpEntryNo = EFI_PAGE_SIZE / sizeof (UINT64);\r | |
46 | \r | |
47 | //\r | |
48 | // Calculate total PrpList number.\r | |
49 | //\r | |
50 | PrpListNo = (UINTN) DivU64x64Remainder ((UINT64)Pages, (UINT64)PrpEntryNo, &Remainder);\r | |
51 | if (Remainder != 0) {\r | |
52 | PrpListNo += 1;\r | |
53 | }\r | |
54 | \r | |
55 | if (PrpListNo > NVME_PRP_SIZE) {\r | |
56 | DEBUG ((\r | |
57 | DEBUG_ERROR,\r | |
58 | "%a: The implementation only supports PrpList number up to 4."\r | |
59 | " But %d are needed here.\n",\r | |
60 | __FUNCTION__,\r | |
61 | PrpListNo\r | |
62 | ));\r | |
63 | return 0;\r | |
64 | }\r | |
65 | PrpListHost = (VOID *)(UINTN) NVME_PRP_BASE (Private);\r | |
66 | \r | |
67 | Bytes = EFI_PAGES_TO_SIZE (PrpListNo);\r | |
68 | PrpListPhyAddr = (UINT64)(UINTN)(PrpListHost);\r | |
69 | \r | |
70 | //\r | |
71 | // Fill all PRP lists except of last one.\r | |
72 | //\r | |
73 | ZeroMem (PrpListHost, Bytes);\r | |
74 | for (PrpListIndex = 0; PrpListIndex < PrpListNo - 1; ++PrpListIndex) {\r | |
75 | PrpListBase = (UINTN)PrpListHost + PrpListIndex * EFI_PAGE_SIZE;\r | |
76 | \r | |
77 | for (PrpEntryIndex = 0; PrpEntryIndex < PrpEntryNo; ++PrpEntryIndex) {\r | |
78 | PrpEntry = (UINT8 *)(UINTN) (PrpListBase + PrpEntryIndex * sizeof(UINT64));\r | |
79 | if (PrpEntryIndex != PrpEntryNo - 1) {\r | |
80 | //\r | |
81 | // Fill all PRP entries except of last one.\r | |
82 | //\r | |
83 | CopyMem (PrpEntry, (VOID *)(UINTN) (&PhysicalAddr), sizeof (UINT64));\r | |
84 | PhysicalAddr += EFI_PAGE_SIZE;\r | |
85 | } else {\r | |
86 | //\r | |
87 | // Fill last PRP entries with next PRP List pointer.\r | |
88 | //\r | |
89 | NewPhyAddr = (PrpListPhyAddr + (PrpListIndex + 1) * EFI_PAGE_SIZE);\r | |
90 | CopyMem (PrpEntry, (VOID *)(UINTN) (&NewPhyAddr), sizeof (UINT64));\r | |
91 | }\r | |
92 | }\r | |
93 | }\r | |
94 | \r | |
95 | //\r | |
96 | // Fill last PRP list.\r | |
97 | //\r | |
98 | PrpListBase = (UINTN)PrpListHost + PrpListIndex * EFI_PAGE_SIZE;\r | |
99 | for (PrpEntryIndex = 0; PrpEntryIndex < ((Remainder != 0) ? Remainder : PrpEntryNo); ++PrpEntryIndex) {\r | |
100 | PrpEntry = (UINT8 *)(UINTN) (PrpListBase + PrpEntryIndex * sizeof(UINT64));\r | |
101 | CopyMem (PrpEntry, (VOID *)(UINTN) (&PhysicalAddr), sizeof (UINT64));\r | |
102 | \r | |
103 | PhysicalAddr += EFI_PAGE_SIZE;\r | |
104 | }\r | |
105 | \r | |
106 | return PrpListPhyAddr;\r | |
107 | }\r | |
108 | \r | |
109 | /**\r | |
110 | Check the execution status from a given completion queue entry.\r | |
111 | \r | |
112 | @param[in] Cq A pointer to the NVME_CQ item.\r | |
113 | \r | |
114 | **/\r | |
115 | EFI_STATUS\r | |
116 | NvmeCheckCqStatus (\r | |
117 | IN NVME_CQ *Cq\r | |
118 | )\r | |
119 | {\r | |
120 | if (Cq->Sct == 0x0 && Cq->Sc == 0x0) {\r | |
121 | return EFI_SUCCESS;\r | |
122 | }\r | |
123 | \r | |
124 | DEBUG ((DEBUG_INFO, "Dump NVMe Completion Entry Status from [0x%x]:\n", (UINTN)Cq));\r | |
125 | DEBUG ((\r | |
126 | DEBUG_INFO,\r | |
127 | " SQ Identifier : [0x%x], Phase Tag : [%d], Cmd Identifier : [0x%x]\n",\r | |
128 | Cq->Sqid,\r | |
129 | Cq->Pt,\r | |
130 | Cq->Cid\r | |
131 | ));\r | |
132 | DEBUG ((DEBUG_INFO, " Status Code Type : [0x%x], Status Code : [0x%x]\n", Cq->Sct, Cq->Sc));\r | |
133 | DEBUG ((DEBUG_INFO, " NVMe Cmd Execution Result - "));\r | |
134 | \r | |
135 | switch (Cq->Sct) {\r | |
136 | case 0x0:\r | |
137 | switch (Cq->Sc) {\r | |
138 | case 0x0:\r | |
139 | DEBUG ((DEBUG_INFO, "Successful Completion\n"));\r | |
140 | return EFI_SUCCESS;\r | |
141 | case 0x1:\r | |
142 | DEBUG ((DEBUG_INFO, "Invalid Command Opcode\n"));\r | |
143 | break;\r | |
144 | case 0x2:\r | |
145 | DEBUG ((DEBUG_INFO, "Invalid Field in Command\n"));\r | |
146 | break;\r | |
147 | case 0x3:\r | |
148 | DEBUG ((DEBUG_INFO, "Command ID Conflict\n"));\r | |
149 | break;\r | |
150 | case 0x4:\r | |
151 | DEBUG ((DEBUG_INFO, "Data Transfer Error\n"));\r | |
152 | break;\r | |
153 | case 0x5:\r | |
154 | DEBUG ((DEBUG_INFO, "Commands Aborted due to Power Loss Notification\n"));\r | |
155 | break;\r | |
156 | case 0x6:\r | |
157 | DEBUG ((DEBUG_INFO, "Internal Device Error\n"));\r | |
158 | break;\r | |
159 | case 0x7:\r | |
160 | DEBUG ((DEBUG_INFO, "Command Abort Requested\n"));\r | |
161 | break;\r | |
162 | case 0x8:\r | |
163 | DEBUG ((DEBUG_INFO, "Command Aborted due to SQ Deletion\n"));\r | |
164 | break;\r | |
165 | case 0x9:\r | |
166 | DEBUG ((DEBUG_INFO, "Command Aborted due to Failed Fused Command\n"));\r | |
167 | break;\r | |
168 | case 0xA:\r | |
169 | DEBUG ((DEBUG_INFO, "Command Aborted due to Missing Fused Command\n"));\r | |
170 | break;\r | |
171 | case 0xB:\r | |
172 | DEBUG ((DEBUG_INFO, "Invalid Namespace or Format\n"));\r | |
173 | break;\r | |
174 | case 0xC:\r | |
175 | DEBUG ((DEBUG_INFO, "Command Sequence Error\n"));\r | |
176 | break;\r | |
177 | case 0xD:\r | |
178 | DEBUG ((DEBUG_INFO, "Invalid SGL Last Segment Descriptor\n"));\r | |
179 | break;\r | |
180 | case 0xE:\r | |
181 | DEBUG ((DEBUG_INFO, "Invalid Number of SGL Descriptors\n"));\r | |
182 | break;\r | |
183 | case 0xF:\r | |
184 | DEBUG ((DEBUG_INFO, "Data SGL Length Invalid\n"));\r | |
185 | break;\r | |
186 | case 0x10:\r | |
187 | DEBUG ((DEBUG_INFO, "Metadata SGL Length Invalid\n"));\r | |
188 | break;\r | |
189 | case 0x11:\r | |
190 | DEBUG ((DEBUG_INFO, "SGL Descriptor Type Invalid\n"));\r | |
191 | break;\r | |
192 | case 0x80:\r | |
193 | DEBUG ((DEBUG_INFO, "LBA Out of Range\n"));\r | |
194 | break;\r | |
195 | case 0x81:\r | |
196 | DEBUG ((DEBUG_INFO, "Capacity Exceeded\n"));\r | |
197 | break;\r | |
198 | case 0x82:\r | |
199 | DEBUG ((DEBUG_INFO, "Namespace Not Ready\n"));\r | |
200 | break;\r | |
201 | case 0x83:\r | |
202 | DEBUG ((DEBUG_INFO, "Reservation Conflict\n"));\r | |
203 | break;\r | |
204 | }\r | |
205 | break;\r | |
206 | \r | |
207 | case 0x1:\r | |
208 | switch (Cq->Sc) {\r | |
209 | case 0x0:\r | |
210 | DEBUG ((DEBUG_INFO, "Completion Queue Invalid\n"));\r | |
211 | break;\r | |
212 | case 0x1:\r | |
213 | DEBUG ((DEBUG_INFO, "Invalid Queue Identifier\n"));\r | |
214 | break;\r | |
215 | case 0x2:\r | |
216 | DEBUG ((DEBUG_INFO, "Maximum Queue Size Exceeded\n"));\r | |
217 | break;\r | |
218 | case 0x3:\r | |
219 | DEBUG ((DEBUG_INFO, "Abort Command Limit Exceeded\n"));\r | |
220 | break;\r | |
221 | case 0x5:\r | |
222 | DEBUG ((DEBUG_INFO, "Asynchronous Event Request Limit Exceeded\n"));\r | |
223 | break;\r | |
224 | case 0x6:\r | |
225 | DEBUG ((DEBUG_INFO, "Invalid Firmware Slot\n"));\r | |
226 | break;\r | |
227 | case 0x7:\r | |
228 | DEBUG ((DEBUG_INFO, "Invalid Firmware Image\n"));\r | |
229 | break;\r | |
230 | case 0x8:\r | |
231 | DEBUG ((DEBUG_INFO, "Invalid Interrupt Vector\n"));\r | |
232 | break;\r | |
233 | case 0x9:\r | |
234 | DEBUG ((DEBUG_INFO, "Invalid Log Page\n"));\r | |
235 | break;\r | |
236 | case 0xA:\r | |
237 | DEBUG ((DEBUG_INFO, "Invalid Format\n"));\r | |
238 | break;\r | |
239 | case 0xB:\r | |
240 | DEBUG ((DEBUG_INFO, "Firmware Application Requires Conventional Reset\n"));\r | |
241 | break;\r | |
242 | case 0xC:\r | |
243 | DEBUG ((DEBUG_INFO, "Invalid Queue Deletion\n"));\r | |
244 | break;\r | |
245 | case 0xD:\r | |
246 | DEBUG ((DEBUG_INFO, "Feature Identifier Not Saveable\n"));\r | |
247 | break;\r | |
248 | case 0xE:\r | |
249 | DEBUG ((DEBUG_INFO, "Feature Not Changeable\n"));\r | |
250 | break;\r | |
251 | case 0xF:\r | |
252 | DEBUG ((DEBUG_INFO, "Feature Not Namespace Specific\n"));\r | |
253 | break;\r | |
254 | case 0x10:\r | |
255 | DEBUG ((DEBUG_INFO, "Firmware Application Requires NVM Subsystem Reset\n"));\r | |
256 | break;\r | |
257 | case 0x80:\r | |
258 | DEBUG ((DEBUG_INFO, "Conflicting Attributes\n"));\r | |
259 | break;\r | |
260 | case 0x81:\r | |
261 | DEBUG ((DEBUG_INFO, "Invalid Protection Information\n"));\r | |
262 | break;\r | |
263 | case 0x82:\r | |
264 | DEBUG ((DEBUG_INFO, "Attempted Write to Read Only Range\n"));\r | |
265 | break;\r | |
266 | }\r | |
267 | break;\r | |
268 | \r | |
269 | case 0x2:\r | |
270 | switch (Cq->Sc) {\r | |
271 | case 0x80:\r | |
272 | DEBUG ((DEBUG_INFO, "Write Fault\n"));\r | |
273 | break;\r | |
274 | case 0x81:\r | |
275 | DEBUG ((DEBUG_INFO, "Unrecovered Read Error\n"));\r | |
276 | break;\r | |
277 | case 0x82:\r | |
278 | DEBUG ((DEBUG_INFO, "End-to-end Guard Check Error\n"));\r | |
279 | break;\r | |
280 | case 0x83:\r | |
281 | DEBUG ((DEBUG_INFO, "End-to-end Application Tag Check Error\n"));\r | |
282 | break;\r | |
283 | case 0x84:\r | |
284 | DEBUG ((DEBUG_INFO, "End-to-end Reference Tag Check Error\n"));\r | |
285 | break;\r | |
286 | case 0x85:\r | |
287 | DEBUG ((DEBUG_INFO, "Compare Failure\n"));\r | |
288 | break;\r | |
289 | case 0x86:\r | |
290 | DEBUG ((DEBUG_INFO, "Access Denied\n"));\r | |
291 | break;\r | |
292 | }\r | |
293 | break;\r | |
294 | \r | |
295 | default:\r | |
296 | DEBUG ((DEBUG_INFO, "Unknown error\n"));\r | |
297 | break;\r | |
298 | }\r | |
299 | \r | |
300 | return EFI_DEVICE_ERROR;\r | |
301 | }\r | |
302 | \r | |
303 | /**\r | |
304 | Sends an NVM Express Command Packet to an NVM Express controller or namespace. This function only\r | |
305 | supports blocking execution of the command.\r | |
306 | \r | |
307 | @param[in] Private The pointer to the NVME_CONTEXT Data structure.\r | |
308 | @param[in] NamespaceId Is a 32 bit Namespace ID to which the Express HCI command packet will\r | |
309 | be sent.\r | |
310 | A Value of 0 denotes the NVM Express controller, a Value of all 0FFh in\r | |
311 | the namespace ID specifies that the command packet should be sent to all\r | |
312 | valid namespaces.\r | |
313 | @param[in,out] Packet A pointer to the EDKII PEI NVM Express PassThru Command Packet to send\r | |
314 | to the NVMe namespace specified by NamespaceId.\r | |
315 | \r | |
316 | @retval EFI_SUCCESS The EDKII PEI NVM Express Command Packet was sent by the host.\r | |
317 | TransferLength bytes were transferred to, or from DataBuffer.\r | |
318 | @retval EFI_NOT_READY The EDKII PEI NVM Express Command Packet could not be sent because\r | |
319 | the controller is not ready. The caller may retry again later.\r | |
320 | @retval EFI_DEVICE_ERROR A device error occurred while attempting to send the EDKII PEI NVM\r | |
321 | Express Command Packet.\r | |
322 | @retval EFI_INVALID_PARAMETER Namespace, or the contents of EDKII_PEI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET\r | |
323 | are invalid.\r | |
324 | The EDKII PEI NVM Express Command Packet was not sent, so no\r | |
325 | additional status information is available.\r | |
326 | @retval EFI_UNSUPPORTED The command described by the EDKII PEI NVM Express Command Packet\r | |
327 | is not supported by the host adapter.\r | |
328 | The EDKII PEI NVM Express Command Packet was not sent, so no\r | |
329 | additional status information is available.\r | |
330 | @retval EFI_TIMEOUT A timeout occurred while waiting for the EDKII PEI NVM Express Command\r | |
331 | Packet to execute.\r | |
332 | \r | |
333 | **/\r | |
334 | EFI_STATUS\r | |
335 | NvmePassThru (\r | |
336 | IN PEI_NVME_CONTROLLER_PRIVATE_DATA *Private,\r | |
337 | IN UINT32 NamespaceId,\r | |
338 | IN OUT EDKII_PEI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET *Packet\r | |
339 | )\r | |
340 | {\r | |
341 | EFI_STATUS Status;\r | |
342 | NVME_SQ *Sq;\r | |
343 | NVME_CQ *Cq;\r | |
344 | UINT8 QueueId;\r | |
345 | UINTN SqSize;\r | |
346 | UINTN CqSize;\r | |
347 | EDKII_IOMMU_OPERATION MapOp;\r | |
348 | UINTN MapLength;\r | |
349 | EFI_PHYSICAL_ADDRESS PhyAddr;\r | |
350 | VOID *MapData;\r | |
351 | VOID *MapMeta;\r | |
352 | UINT32 Bytes;\r | |
353 | UINT32 Offset;\r | |
354 | UINT32 Data32;\r | |
355 | UINT64 Timer;\r | |
356 | \r | |
357 | //\r | |
358 | // Check the data fields in Packet parameter\r | |
359 | //\r | |
360 | if (Packet == NULL) {\r | |
361 | DEBUG ((\r | |
362 | DEBUG_ERROR,\r | |
363 | "%a, Invalid parameter: Packet(%lx)\n",\r | |
364 | __FUNCTION__,\r | |
365 | (UINTN)Packet\r | |
366 | ));\r | |
367 | return EFI_INVALID_PARAMETER;\r | |
368 | }\r | |
369 | \r | |
370 | if ((Packet->NvmeCmd == NULL) || (Packet->NvmeCompletion == NULL)) {\r | |
371 | DEBUG ((\r | |
372 | DEBUG_ERROR,\r | |
373 | "%a, Invalid parameter: NvmeCmd (%lx)/NvmeCompletion(%lx)\n",\r | |
374 | __FUNCTION__,\r | |
375 | (UINTN)Packet->NvmeCmd,\r | |
376 | (UINTN)Packet->NvmeCompletion\r | |
377 | ));\r | |
378 | return EFI_INVALID_PARAMETER;\r | |
379 | }\r | |
380 | \r | |
381 | if (Packet->QueueType != NVME_ADMIN_QUEUE && Packet->QueueType != NVME_IO_QUEUE) {\r | |
382 | DEBUG ((\r | |
383 | DEBUG_ERROR,\r | |
384 | "%a, Invalid parameter: QueueId(%lx)\n",\r | |
385 | __FUNCTION__,\r | |
386 | (UINTN)Packet->QueueType\r | |
387 | ));\r | |
388 | return EFI_INVALID_PARAMETER;\r | |
389 | }\r | |
390 | \r | |
391 | QueueId = Packet->QueueType;\r | |
392 | Sq = Private->SqBuffer[QueueId] + Private->SqTdbl[QueueId].Sqt;\r | |
393 | Cq = Private->CqBuffer[QueueId] + Private->CqHdbl[QueueId].Cqh;\r | |
394 | if (QueueId == NVME_ADMIN_QUEUE) {\r | |
395 | SqSize = NVME_ASQ_SIZE + 1;\r | |
396 | CqSize = NVME_ACQ_SIZE + 1;\r | |
397 | } else {\r | |
398 | SqSize = NVME_CSQ_SIZE + 1;\r | |
399 | CqSize = NVME_CCQ_SIZE + 1;\r | |
400 | }\r | |
401 | \r | |
402 | if (Packet->NvmeCmd->Nsid != NamespaceId) {\r | |
403 | DEBUG ((\r | |
404 | DEBUG_ERROR,\r | |
405 | "%a: Nsid mismatch (%x, %x)\n",\r | |
406 | __FUNCTION__,\r | |
407 | Packet->NvmeCmd->Nsid,\r | |
408 | NamespaceId\r | |
409 | ));\r | |
410 | return EFI_INVALID_PARAMETER;\r | |
411 | }\r | |
412 | \r | |
413 | ZeroMem (Sq, sizeof (NVME_SQ));\r | |
414 | Sq->Opc = Packet->NvmeCmd->Cdw0.Opcode;\r | |
415 | Sq->Fuse = Packet->NvmeCmd->Cdw0.FusedOperation;\r | |
416 | Sq->Cid = Packet->NvmeCmd->Cdw0.Cid;\r | |
417 | Sq->Nsid = Packet->NvmeCmd->Nsid;\r | |
418 | \r | |
419 | //\r | |
420 | // Currently we only support PRP for data transfer, SGL is NOT supported\r | |
421 | //\r | |
422 | ASSERT (Sq->Psdt == 0);\r | |
423 | if (Sq->Psdt != 0) {\r | |
424 | DEBUG ((DEBUG_ERROR, "%a: Does not support SGL mechanism.\n", __FUNCTION__));\r | |
425 | return EFI_UNSUPPORTED;\r | |
426 | }\r | |
427 | \r | |
428 | Sq->Prp[0] = (UINT64)(UINTN)Packet->TransferBuffer;\r | |
429 | Sq->Prp[1] = 0;\r | |
430 | MapData = NULL;\r | |
431 | MapMeta = NULL;\r | |
432 | Status = EFI_SUCCESS;\r | |
433 | //\r | |
434 | // If the NVMe cmd has data in or out, then mapping the user buffer to the PCI controller\r | |
435 | // specific addresses.\r | |
436 | //\r | |
437 | if ((Sq->Opc & (BIT0 | BIT1)) != 0) {\r | |
bd224a5d HW |
438 | if (((Packet->TransferLength != 0) && (Packet->TransferBuffer == NULL)) ||\r |
439 | ((Packet->TransferLength == 0) && (Packet->TransferBuffer != NULL))) {\r | |
b8b69433 HW |
440 | return EFI_INVALID_PARAMETER;\r |
441 | }\r | |
442 | \r | |
443 | //\r | |
444 | // Currently, we only support creating IO submission/completion queues that are\r | |
445 | // allocated internally by the driver.\r | |
446 | //\r | |
447 | if ((Packet->QueueType == NVME_ADMIN_QUEUE) &&\r | |
448 | ((Sq->Opc == NVME_ADMIN_CRIOCQ_CMD) || (Sq->Opc == NVME_ADMIN_CRIOSQ_CMD))) {\r | |
449 | if ((Packet->TransferBuffer != Private->SqBuffer[NVME_IO_QUEUE]) &&\r | |
450 | (Packet->TransferBuffer != Private->CqBuffer[NVME_IO_QUEUE])) {\r | |
451 | DEBUG ((\r | |
452 | DEBUG_ERROR,\r | |
453 | "%a: Does not support external IO queues creation request.\n",\r | |
454 | __FUNCTION__\r | |
455 | ));\r | |
456 | return EFI_UNSUPPORTED;\r | |
457 | }\r | |
458 | } else {\r | |
459 | if ((Sq->Opc & BIT0) != 0) {\r | |
460 | MapOp = EdkiiIoMmuOperationBusMasterRead;\r | |
461 | } else {\r | |
462 | MapOp = EdkiiIoMmuOperationBusMasterWrite;\r | |
463 | }\r | |
464 | \r | |
bd224a5d HW |
465 | if ((Packet->TransferLength != 0) && (Packet->TransferBuffer != NULL)) {\r |
466 | MapLength = Packet->TransferLength;\r | |
467 | Status = IoMmuMap (\r | |
468 | MapOp,\r | |
469 | Packet->TransferBuffer,\r | |
470 | &MapLength,\r | |
471 | &PhyAddr,\r | |
472 | &MapData\r | |
473 | );\r | |
474 | if (EFI_ERROR (Status) || (MapLength != Packet->TransferLength)) {\r | |
475 | Status = EFI_OUT_OF_RESOURCES;\r | |
476 | DEBUG ((DEBUG_ERROR, "%a: Fail to map data buffer.\n", __FUNCTION__));\r | |
477 | goto Exit;\r | |
478 | }\r | |
479 | \r | |
480 | Sq->Prp[0] = PhyAddr;\r | |
b8b69433 HW |
481 | }\r |
482 | \r | |
b8b69433 HW |
483 | if((Packet->MetadataLength != 0) && (Packet->MetadataBuffer != NULL)) {\r |
484 | MapLength = Packet->MetadataLength;\r | |
485 | Status = IoMmuMap (\r | |
486 | MapOp,\r | |
487 | Packet->MetadataBuffer,\r | |
488 | &MapLength,\r | |
489 | &PhyAddr,\r | |
490 | &MapMeta\r | |
491 | );\r | |
492 | if (EFI_ERROR (Status) || (MapLength != Packet->MetadataLength)) {\r | |
493 | Status = EFI_OUT_OF_RESOURCES;\r | |
494 | DEBUG ((DEBUG_ERROR, "%a: Fail to map meta data buffer.\n", __FUNCTION__));\r | |
495 | goto Exit;\r | |
496 | }\r | |
497 | Sq->Mptr = PhyAddr;\r | |
498 | }\r | |
499 | }\r | |
500 | }\r | |
501 | \r | |
502 | //\r | |
503 | // If the Buffer Size spans more than two memory pages (page Size as defined in CC.Mps),\r | |
504 | // then build a PRP list in the second PRP submission queue entry.\r | |
505 | //\r | |
506 | Offset = ((UINT32)Sq->Prp[0]) & (EFI_PAGE_SIZE - 1);\r | |
507 | Bytes = Packet->TransferLength;\r | |
508 | \r | |
509 | if ((Offset + Bytes) > (EFI_PAGE_SIZE * 2)) {\r | |
510 | //\r | |
511 | // Create PrpList for remaining Data Buffer.\r | |
512 | //\r | |
513 | PhyAddr = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1);\r | |
514 | Sq->Prp[1] = NvmeCreatePrpList (\r | |
515 | Private,\r | |
516 | PhyAddr,\r | |
517 | EFI_SIZE_TO_PAGES(Offset + Bytes) - 1\r | |
518 | );\r | |
519 | if (Sq->Prp[1] == 0) {\r | |
520 | Status = EFI_OUT_OF_RESOURCES;\r | |
521 | DEBUG ((DEBUG_ERROR, "%a: Create PRP list fail, Status - %r\n", __FUNCTION__, Status));\r | |
522 | goto Exit;\r | |
523 | }\r | |
524 | \r | |
525 | } else if ((Offset + Bytes) > EFI_PAGE_SIZE) {\r | |
526 | Sq->Prp[1] = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1);\r | |
527 | }\r | |
528 | \r | |
529 | if (Packet->NvmeCmd->Flags & CDW10_VALID) {\r | |
530 | Sq->Payload.Raw.Cdw10 = Packet->NvmeCmd->Cdw10;\r | |
531 | }\r | |
532 | if (Packet->NvmeCmd->Flags & CDW11_VALID) {\r | |
533 | Sq->Payload.Raw.Cdw11 = Packet->NvmeCmd->Cdw11;\r | |
534 | }\r | |
535 | if (Packet->NvmeCmd->Flags & CDW12_VALID) {\r | |
536 | Sq->Payload.Raw.Cdw12 = Packet->NvmeCmd->Cdw12;\r | |
537 | }\r | |
538 | if (Packet->NvmeCmd->Flags & CDW13_VALID) {\r | |
539 | Sq->Payload.Raw.Cdw13 = Packet->NvmeCmd->Cdw13;\r | |
540 | }\r | |
541 | if (Packet->NvmeCmd->Flags & CDW14_VALID) {\r | |
542 | Sq->Payload.Raw.Cdw14 = Packet->NvmeCmd->Cdw14;\r | |
543 | }\r | |
544 | if (Packet->NvmeCmd->Flags & CDW15_VALID) {\r | |
545 | Sq->Payload.Raw.Cdw15 = Packet->NvmeCmd->Cdw15;\r | |
546 | }\r | |
547 | \r | |
548 | //\r | |
549 | // Ring the submission queue doorbell.\r | |
550 | //\r | |
551 | Private->SqTdbl[QueueId].Sqt++;\r | |
552 | if (Private->SqTdbl[QueueId].Sqt == SqSize) {\r | |
553 | Private->SqTdbl[QueueId].Sqt = 0;\r | |
554 | }\r | |
555 | Data32 = ReadUnaligned32 ((UINT32 *)&Private->SqTdbl[QueueId]);\r | |
556 | Status = NVME_SET_SQTDBL (Private, QueueId, &Data32);\r | |
557 | if (EFI_ERROR (Status)) {\r | |
558 | DEBUG ((DEBUG_ERROR, "%a: NVME_SET_SQTDBL fail, Status - %r\n", __FUNCTION__, Status));\r | |
559 | goto Exit;\r | |
560 | }\r | |
561 | \r | |
562 | //\r | |
563 | // Wait for completion queue to get filled in.\r | |
564 | //\r | |
565 | Status = EFI_TIMEOUT;\r | |
566 | Timer = 0;\r | |
567 | while (Timer < Packet->CommandTimeout) {\r | |
568 | if (Cq->Pt != Private->Pt[QueueId]) {\r | |
569 | Status = EFI_SUCCESS;\r | |
570 | break;\r | |
571 | }\r | |
572 | \r | |
573 | MicroSecondDelay (NVME_POLL_INTERVAL);\r | |
574 | Timer += NVME_POLL_INTERVAL;\r | |
575 | }\r | |
576 | \r | |
577 | if (Status == EFI_TIMEOUT) {\r | |
578 | //\r | |
579 | // Timeout occurs for an NVMe command, reset the controller to abort the outstanding command\r | |
580 | //\r | |
581 | DEBUG ((DEBUG_ERROR, "%a: Timeout occurs for the PassThru command.\n", __FUNCTION__));\r | |
582 | Status = NvmeControllerInit (Private);\r | |
583 | if (EFI_ERROR (Status)) {\r | |
584 | Status = EFI_DEVICE_ERROR;\r | |
585 | } else {\r | |
586 | //\r | |
587 | // Return EFI_TIMEOUT to indicate a timeout occurs for PassThru command\r | |
588 | //\r | |
589 | Status = EFI_TIMEOUT;\r | |
590 | }\r | |
591 | goto Exit;\r | |
592 | }\r | |
593 | \r | |
594 | //\r | |
595 | // Move forward the Completion Queue head\r | |
596 | //\r | |
597 | Private->CqHdbl[QueueId].Cqh++;\r | |
598 | if (Private->CqHdbl[QueueId].Cqh == CqSize) {\r | |
599 | Private->CqHdbl[QueueId].Cqh = 0;\r | |
600 | Private->Pt[QueueId] ^= 1;\r | |
601 | }\r | |
602 | \r | |
603 | //\r | |
604 | // Copy the Respose Queue entry for this command to the callers response buffer\r | |
605 | //\r | |
606 | CopyMem (Packet->NvmeCompletion, Cq, sizeof (EDKII_PEI_NVM_EXPRESS_COMPLETION));\r | |
607 | \r | |
608 | //\r | |
609 | // Check the NVMe cmd execution result\r | |
610 | //\r | |
611 | Status = NvmeCheckCqStatus (Cq);\r | |
612 | NVME_SET_CQHDBL (Private, QueueId, &Private->CqHdbl[QueueId]);\r | |
613 | \r | |
614 | Exit:\r | |
615 | if (MapMeta != NULL) {\r | |
616 | IoMmuUnmap (MapMeta);\r | |
617 | }\r | |
618 | \r | |
619 | if (MapData != NULL) {\r | |
620 | IoMmuUnmap (MapData);\r | |
621 | }\r | |
622 | \r | |
623 | return Status;\r | |
624 | }\r |