]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Bus/Pci/NvmExpressPei/NvmExpressPeiPassThru.c
MdeModulePkg/NvmExpressPei: Add the NVME device PEI BlockIo support
[mirror_edk2.git] / MdeModulePkg / Bus / Pci / NvmExpressPei / NvmExpressPeiPassThru.c
1 /** @file
2 The NvmExpressPei driver is used to manage non-volatile memory subsystem
3 which follows NVM Express specification at PEI phase.
4
5 Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions
9 of the BSD License which accompanies this distribution. The
10 full text of the license may be found at
11 http://opensource.org/licenses/bsd-license.php
12
13 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
14 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15
16 **/
17
18 #include "NvmExpressPei.h"
19
20 /**
21 Create PRP lists for Data transfer which is larger than 2 memory pages.
22
23 @param[in] Private The pointer to the PEI_NVME_CONTROLLER_PRIVATE_DATA data structure.
24 @param[in] PhysicalAddr The physical base address of Data Buffer.
25 @param[in] Pages The number of pages to be transfered.
26
27 @retval The pointer Value to the first PRP List of the PRP lists.
28
29 **/
30 UINT64
31 NvmeCreatePrpList (
32 IN PEI_NVME_CONTROLLER_PRIVATE_DATA *Private,
33 IN EFI_PHYSICAL_ADDRESS PhysicalAddr,
34 IN UINTN Pages
35 )
36 {
37 UINTN PrpEntryNo;
38 UINTN PrpListNo;
39 UINT64 PrpListBase;
40 VOID *PrpListHost;
41 UINTN PrpListIndex;
42 UINTN PrpEntryIndex;
43 UINT64 Remainder;
44 EFI_PHYSICAL_ADDRESS PrpListPhyAddr;
45 UINTN Bytes;
46 UINT8 *PrpEntry;
47 EFI_PHYSICAL_ADDRESS NewPhyAddr;
48
49 //
50 // The number of Prp Entry in a memory page.
51 //
52 PrpEntryNo = EFI_PAGE_SIZE / sizeof (UINT64);
53
54 //
55 // Calculate total PrpList number.
56 //
57 PrpListNo = (UINTN) DivU64x64Remainder ((UINT64)Pages, (UINT64)PrpEntryNo, &Remainder);
58 if (Remainder != 0) {
59 PrpListNo += 1;
60 }
61
62 if (PrpListNo > NVME_PRP_SIZE) {
63 DEBUG ((
64 DEBUG_ERROR,
65 "%a: The implementation only supports PrpList number up to 4."
66 " But %d are needed here.\n",
67 __FUNCTION__,
68 PrpListNo
69 ));
70 return 0;
71 }
72 PrpListHost = (VOID *)(UINTN) NVME_PRP_BASE (Private);
73
74 Bytes = EFI_PAGES_TO_SIZE (PrpListNo);
75 PrpListPhyAddr = (UINT64)(UINTN)(PrpListHost);
76
77 //
78 // Fill all PRP lists except of last one.
79 //
80 ZeroMem (PrpListHost, Bytes);
81 for (PrpListIndex = 0; PrpListIndex < PrpListNo - 1; ++PrpListIndex) {
82 PrpListBase = (UINTN)PrpListHost + PrpListIndex * EFI_PAGE_SIZE;
83
84 for (PrpEntryIndex = 0; PrpEntryIndex < PrpEntryNo; ++PrpEntryIndex) {
85 PrpEntry = (UINT8 *)(UINTN) (PrpListBase + PrpEntryIndex * sizeof(UINT64));
86 if (PrpEntryIndex != PrpEntryNo - 1) {
87 //
88 // Fill all PRP entries except of last one.
89 //
90 CopyMem (PrpEntry, (VOID *)(UINTN) (&PhysicalAddr), sizeof (UINT64));
91 PhysicalAddr += EFI_PAGE_SIZE;
92 } else {
93 //
94 // Fill last PRP entries with next PRP List pointer.
95 //
96 NewPhyAddr = (PrpListPhyAddr + (PrpListIndex + 1) * EFI_PAGE_SIZE);
97 CopyMem (PrpEntry, (VOID *)(UINTN) (&NewPhyAddr), sizeof (UINT64));
98 }
99 }
100 }
101
102 //
103 // Fill last PRP list.
104 //
105 PrpListBase = (UINTN)PrpListHost + PrpListIndex * EFI_PAGE_SIZE;
106 for (PrpEntryIndex = 0; PrpEntryIndex < ((Remainder != 0) ? Remainder : PrpEntryNo); ++PrpEntryIndex) {
107 PrpEntry = (UINT8 *)(UINTN) (PrpListBase + PrpEntryIndex * sizeof(UINT64));
108 CopyMem (PrpEntry, (VOID *)(UINTN) (&PhysicalAddr), sizeof (UINT64));
109
110 PhysicalAddr += EFI_PAGE_SIZE;
111 }
112
113 return PrpListPhyAddr;
114 }
115
116 /**
117 Check the execution status from a given completion queue entry.
118
119 @param[in] Cq A pointer to the NVME_CQ item.
120
121 **/
122 EFI_STATUS
123 NvmeCheckCqStatus (
124 IN NVME_CQ *Cq
125 )
126 {
127 if (Cq->Sct == 0x0 && Cq->Sc == 0x0) {
128 return EFI_SUCCESS;
129 }
130
131 DEBUG ((DEBUG_INFO, "Dump NVMe Completion Entry Status from [0x%x]:\n", (UINTN)Cq));
132 DEBUG ((
133 DEBUG_INFO,
134 " SQ Identifier : [0x%x], Phase Tag : [%d], Cmd Identifier : [0x%x]\n",
135 Cq->Sqid,
136 Cq->Pt,
137 Cq->Cid
138 ));
139 DEBUG ((DEBUG_INFO, " Status Code Type : [0x%x], Status Code : [0x%x]\n", Cq->Sct, Cq->Sc));
140 DEBUG ((DEBUG_INFO, " NVMe Cmd Execution Result - "));
141
142 switch (Cq->Sct) {
143 case 0x0:
144 switch (Cq->Sc) {
145 case 0x0:
146 DEBUG ((DEBUG_INFO, "Successful Completion\n"));
147 return EFI_SUCCESS;
148 case 0x1:
149 DEBUG ((DEBUG_INFO, "Invalid Command Opcode\n"));
150 break;
151 case 0x2:
152 DEBUG ((DEBUG_INFO, "Invalid Field in Command\n"));
153 break;
154 case 0x3:
155 DEBUG ((DEBUG_INFO, "Command ID Conflict\n"));
156 break;
157 case 0x4:
158 DEBUG ((DEBUG_INFO, "Data Transfer Error\n"));
159 break;
160 case 0x5:
161 DEBUG ((DEBUG_INFO, "Commands Aborted due to Power Loss Notification\n"));
162 break;
163 case 0x6:
164 DEBUG ((DEBUG_INFO, "Internal Device Error\n"));
165 break;
166 case 0x7:
167 DEBUG ((DEBUG_INFO, "Command Abort Requested\n"));
168 break;
169 case 0x8:
170 DEBUG ((DEBUG_INFO, "Command Aborted due to SQ Deletion\n"));
171 break;
172 case 0x9:
173 DEBUG ((DEBUG_INFO, "Command Aborted due to Failed Fused Command\n"));
174 break;
175 case 0xA:
176 DEBUG ((DEBUG_INFO, "Command Aborted due to Missing Fused Command\n"));
177 break;
178 case 0xB:
179 DEBUG ((DEBUG_INFO, "Invalid Namespace or Format\n"));
180 break;
181 case 0xC:
182 DEBUG ((DEBUG_INFO, "Command Sequence Error\n"));
183 break;
184 case 0xD:
185 DEBUG ((DEBUG_INFO, "Invalid SGL Last Segment Descriptor\n"));
186 break;
187 case 0xE:
188 DEBUG ((DEBUG_INFO, "Invalid Number of SGL Descriptors\n"));
189 break;
190 case 0xF:
191 DEBUG ((DEBUG_INFO, "Data SGL Length Invalid\n"));
192 break;
193 case 0x10:
194 DEBUG ((DEBUG_INFO, "Metadata SGL Length Invalid\n"));
195 break;
196 case 0x11:
197 DEBUG ((DEBUG_INFO, "SGL Descriptor Type Invalid\n"));
198 break;
199 case 0x80:
200 DEBUG ((DEBUG_INFO, "LBA Out of Range\n"));
201 break;
202 case 0x81:
203 DEBUG ((DEBUG_INFO, "Capacity Exceeded\n"));
204 break;
205 case 0x82:
206 DEBUG ((DEBUG_INFO, "Namespace Not Ready\n"));
207 break;
208 case 0x83:
209 DEBUG ((DEBUG_INFO, "Reservation Conflict\n"));
210 break;
211 }
212 break;
213
214 case 0x1:
215 switch (Cq->Sc) {
216 case 0x0:
217 DEBUG ((DEBUG_INFO, "Completion Queue Invalid\n"));
218 break;
219 case 0x1:
220 DEBUG ((DEBUG_INFO, "Invalid Queue Identifier\n"));
221 break;
222 case 0x2:
223 DEBUG ((DEBUG_INFO, "Maximum Queue Size Exceeded\n"));
224 break;
225 case 0x3:
226 DEBUG ((DEBUG_INFO, "Abort Command Limit Exceeded\n"));
227 break;
228 case 0x5:
229 DEBUG ((DEBUG_INFO, "Asynchronous Event Request Limit Exceeded\n"));
230 break;
231 case 0x6:
232 DEBUG ((DEBUG_INFO, "Invalid Firmware Slot\n"));
233 break;
234 case 0x7:
235 DEBUG ((DEBUG_INFO, "Invalid Firmware Image\n"));
236 break;
237 case 0x8:
238 DEBUG ((DEBUG_INFO, "Invalid Interrupt Vector\n"));
239 break;
240 case 0x9:
241 DEBUG ((DEBUG_INFO, "Invalid Log Page\n"));
242 break;
243 case 0xA:
244 DEBUG ((DEBUG_INFO, "Invalid Format\n"));
245 break;
246 case 0xB:
247 DEBUG ((DEBUG_INFO, "Firmware Application Requires Conventional Reset\n"));
248 break;
249 case 0xC:
250 DEBUG ((DEBUG_INFO, "Invalid Queue Deletion\n"));
251 break;
252 case 0xD:
253 DEBUG ((DEBUG_INFO, "Feature Identifier Not Saveable\n"));
254 break;
255 case 0xE:
256 DEBUG ((DEBUG_INFO, "Feature Not Changeable\n"));
257 break;
258 case 0xF:
259 DEBUG ((DEBUG_INFO, "Feature Not Namespace Specific\n"));
260 break;
261 case 0x10:
262 DEBUG ((DEBUG_INFO, "Firmware Application Requires NVM Subsystem Reset\n"));
263 break;
264 case 0x80:
265 DEBUG ((DEBUG_INFO, "Conflicting Attributes\n"));
266 break;
267 case 0x81:
268 DEBUG ((DEBUG_INFO, "Invalid Protection Information\n"));
269 break;
270 case 0x82:
271 DEBUG ((DEBUG_INFO, "Attempted Write to Read Only Range\n"));
272 break;
273 }
274 break;
275
276 case 0x2:
277 switch (Cq->Sc) {
278 case 0x80:
279 DEBUG ((DEBUG_INFO, "Write Fault\n"));
280 break;
281 case 0x81:
282 DEBUG ((DEBUG_INFO, "Unrecovered Read Error\n"));
283 break;
284 case 0x82:
285 DEBUG ((DEBUG_INFO, "End-to-end Guard Check Error\n"));
286 break;
287 case 0x83:
288 DEBUG ((DEBUG_INFO, "End-to-end Application Tag Check Error\n"));
289 break;
290 case 0x84:
291 DEBUG ((DEBUG_INFO, "End-to-end Reference Tag Check Error\n"));
292 break;
293 case 0x85:
294 DEBUG ((DEBUG_INFO, "Compare Failure\n"));
295 break;
296 case 0x86:
297 DEBUG ((DEBUG_INFO, "Access Denied\n"));
298 break;
299 }
300 break;
301
302 default:
303 DEBUG ((DEBUG_INFO, "Unknown error\n"));
304 break;
305 }
306
307 return EFI_DEVICE_ERROR;
308 }
309
310 /**
311 Sends an NVM Express Command Packet to an NVM Express controller or namespace. This function only
312 supports blocking execution of the command.
313
314 @param[in] Private The pointer to the NVME_CONTEXT Data structure.
315 @param[in] NamespaceId Is a 32 bit Namespace ID to which the Express HCI command packet will
316 be sent.
317 A Value of 0 denotes the NVM Express controller, a Value of all 0FFh in
318 the namespace ID specifies that the command packet should be sent to all
319 valid namespaces.
320 @param[in,out] Packet A pointer to the EDKII PEI NVM Express PassThru Command Packet to send
321 to the NVMe namespace specified by NamespaceId.
322
323 @retval EFI_SUCCESS The EDKII PEI NVM Express Command Packet was sent by the host.
324 TransferLength bytes were transferred to, or from DataBuffer.
325 @retval EFI_NOT_READY The EDKII PEI NVM Express Command Packet could not be sent because
326 the controller is not ready. The caller may retry again later.
327 @retval EFI_DEVICE_ERROR A device error occurred while attempting to send the EDKII PEI NVM
328 Express Command Packet.
329 @retval EFI_INVALID_PARAMETER Namespace, or the contents of EDKII_PEI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET
330 are invalid.
331 The EDKII PEI NVM Express Command Packet was not sent, so no
332 additional status information is available.
333 @retval EFI_UNSUPPORTED The command described by the EDKII PEI NVM Express Command Packet
334 is not supported by the host adapter.
335 The EDKII PEI NVM Express Command Packet was not sent, so no
336 additional status information is available.
337 @retval EFI_TIMEOUT A timeout occurred while waiting for the EDKII PEI NVM Express Command
338 Packet to execute.
339
340 **/
341 EFI_STATUS
342 NvmePassThru (
343 IN PEI_NVME_CONTROLLER_PRIVATE_DATA *Private,
344 IN UINT32 NamespaceId,
345 IN OUT EDKII_PEI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET *Packet
346 )
347 {
348 EFI_STATUS Status;
349 NVME_SQ *Sq;
350 NVME_CQ *Cq;
351 UINT8 QueueId;
352 UINTN SqSize;
353 UINTN CqSize;
354 EDKII_IOMMU_OPERATION MapOp;
355 UINTN MapLength;
356 EFI_PHYSICAL_ADDRESS PhyAddr;
357 VOID *MapData;
358 VOID *MapMeta;
359 UINT32 Bytes;
360 UINT32 Offset;
361 UINT32 Data32;
362 UINT64 Timer;
363
364 //
365 // Check the data fields in Packet parameter
366 //
367 if (Packet == NULL) {
368 DEBUG ((
369 DEBUG_ERROR,
370 "%a, Invalid parameter: Packet(%lx)\n",
371 __FUNCTION__,
372 (UINTN)Packet
373 ));
374 return EFI_INVALID_PARAMETER;
375 }
376
377 if ((Packet->NvmeCmd == NULL) || (Packet->NvmeCompletion == NULL)) {
378 DEBUG ((
379 DEBUG_ERROR,
380 "%a, Invalid parameter: NvmeCmd (%lx)/NvmeCompletion(%lx)\n",
381 __FUNCTION__,
382 (UINTN)Packet->NvmeCmd,
383 (UINTN)Packet->NvmeCompletion
384 ));
385 return EFI_INVALID_PARAMETER;
386 }
387
388 if (Packet->QueueType != NVME_ADMIN_QUEUE && Packet->QueueType != NVME_IO_QUEUE) {
389 DEBUG ((
390 DEBUG_ERROR,
391 "%a, Invalid parameter: QueueId(%lx)\n",
392 __FUNCTION__,
393 (UINTN)Packet->QueueType
394 ));
395 return EFI_INVALID_PARAMETER;
396 }
397
398 QueueId = Packet->QueueType;
399 Sq = Private->SqBuffer[QueueId] + Private->SqTdbl[QueueId].Sqt;
400 Cq = Private->CqBuffer[QueueId] + Private->CqHdbl[QueueId].Cqh;
401 if (QueueId == NVME_ADMIN_QUEUE) {
402 SqSize = NVME_ASQ_SIZE + 1;
403 CqSize = NVME_ACQ_SIZE + 1;
404 } else {
405 SqSize = NVME_CSQ_SIZE + 1;
406 CqSize = NVME_CCQ_SIZE + 1;
407 }
408
409 if (Packet->NvmeCmd->Nsid != NamespaceId) {
410 DEBUG ((
411 DEBUG_ERROR,
412 "%a: Nsid mismatch (%x, %x)\n",
413 __FUNCTION__,
414 Packet->NvmeCmd->Nsid,
415 NamespaceId
416 ));
417 return EFI_INVALID_PARAMETER;
418 }
419
420 ZeroMem (Sq, sizeof (NVME_SQ));
421 Sq->Opc = Packet->NvmeCmd->Cdw0.Opcode;
422 Sq->Fuse = Packet->NvmeCmd->Cdw0.FusedOperation;
423 Sq->Cid = Packet->NvmeCmd->Cdw0.Cid;
424 Sq->Nsid = Packet->NvmeCmd->Nsid;
425
426 //
427 // Currently we only support PRP for data transfer, SGL is NOT supported
428 //
429 ASSERT (Sq->Psdt == 0);
430 if (Sq->Psdt != 0) {
431 DEBUG ((DEBUG_ERROR, "%a: Does not support SGL mechanism.\n", __FUNCTION__));
432 return EFI_UNSUPPORTED;
433 }
434
435 Sq->Prp[0] = (UINT64)(UINTN)Packet->TransferBuffer;
436 Sq->Prp[1] = 0;
437 MapData = NULL;
438 MapMeta = NULL;
439 Status = EFI_SUCCESS;
440 //
441 // If the NVMe cmd has data in or out, then mapping the user buffer to the PCI controller
442 // specific addresses.
443 //
444 if ((Sq->Opc & (BIT0 | BIT1)) != 0) {
445 if ((Packet->TransferLength == 0) || (Packet->TransferBuffer == NULL)) {
446 return EFI_INVALID_PARAMETER;
447 }
448
449 //
450 // Currently, we only support creating IO submission/completion queues that are
451 // allocated internally by the driver.
452 //
453 if ((Packet->QueueType == NVME_ADMIN_QUEUE) &&
454 ((Sq->Opc == NVME_ADMIN_CRIOCQ_CMD) || (Sq->Opc == NVME_ADMIN_CRIOSQ_CMD))) {
455 if ((Packet->TransferBuffer != Private->SqBuffer[NVME_IO_QUEUE]) &&
456 (Packet->TransferBuffer != Private->CqBuffer[NVME_IO_QUEUE])) {
457 DEBUG ((
458 DEBUG_ERROR,
459 "%a: Does not support external IO queues creation request.\n",
460 __FUNCTION__
461 ));
462 return EFI_UNSUPPORTED;
463 }
464 } else {
465 if ((Sq->Opc & BIT0) != 0) {
466 MapOp = EdkiiIoMmuOperationBusMasterRead;
467 } else {
468 MapOp = EdkiiIoMmuOperationBusMasterWrite;
469 }
470
471 MapLength = Packet->TransferLength;
472 Status = IoMmuMap (
473 MapOp,
474 Packet->TransferBuffer,
475 &MapLength,
476 &PhyAddr,
477 &MapData
478 );
479 if (EFI_ERROR (Status) || (MapLength != Packet->TransferLength)) {
480 Status = EFI_OUT_OF_RESOURCES;
481 DEBUG ((DEBUG_ERROR, "%a: Fail to map data buffer.\n", __FUNCTION__));
482 goto Exit;
483 }
484
485 Sq->Prp[0] = PhyAddr;
486
487 if((Packet->MetadataLength != 0) && (Packet->MetadataBuffer != NULL)) {
488 MapLength = Packet->MetadataLength;
489 Status = IoMmuMap (
490 MapOp,
491 Packet->MetadataBuffer,
492 &MapLength,
493 &PhyAddr,
494 &MapMeta
495 );
496 if (EFI_ERROR (Status) || (MapLength != Packet->MetadataLength)) {
497 Status = EFI_OUT_OF_RESOURCES;
498 DEBUG ((DEBUG_ERROR, "%a: Fail to map meta data buffer.\n", __FUNCTION__));
499 goto Exit;
500 }
501 Sq->Mptr = PhyAddr;
502 }
503 }
504 }
505
506 //
507 // If the Buffer Size spans more than two memory pages (page Size as defined in CC.Mps),
508 // then build a PRP list in the second PRP submission queue entry.
509 //
510 Offset = ((UINT32)Sq->Prp[0]) & (EFI_PAGE_SIZE - 1);
511 Bytes = Packet->TransferLength;
512
513 if ((Offset + Bytes) > (EFI_PAGE_SIZE * 2)) {
514 //
515 // Create PrpList for remaining Data Buffer.
516 //
517 PhyAddr = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1);
518 Sq->Prp[1] = NvmeCreatePrpList (
519 Private,
520 PhyAddr,
521 EFI_SIZE_TO_PAGES(Offset + Bytes) - 1
522 );
523 if (Sq->Prp[1] == 0) {
524 Status = EFI_OUT_OF_RESOURCES;
525 DEBUG ((DEBUG_ERROR, "%a: Create PRP list fail, Status - %r\n", __FUNCTION__, Status));
526 goto Exit;
527 }
528
529 } else if ((Offset + Bytes) > EFI_PAGE_SIZE) {
530 Sq->Prp[1] = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1);
531 }
532
533 if (Packet->NvmeCmd->Flags & CDW10_VALID) {
534 Sq->Payload.Raw.Cdw10 = Packet->NvmeCmd->Cdw10;
535 }
536 if (Packet->NvmeCmd->Flags & CDW11_VALID) {
537 Sq->Payload.Raw.Cdw11 = Packet->NvmeCmd->Cdw11;
538 }
539 if (Packet->NvmeCmd->Flags & CDW12_VALID) {
540 Sq->Payload.Raw.Cdw12 = Packet->NvmeCmd->Cdw12;
541 }
542 if (Packet->NvmeCmd->Flags & CDW13_VALID) {
543 Sq->Payload.Raw.Cdw13 = Packet->NvmeCmd->Cdw13;
544 }
545 if (Packet->NvmeCmd->Flags & CDW14_VALID) {
546 Sq->Payload.Raw.Cdw14 = Packet->NvmeCmd->Cdw14;
547 }
548 if (Packet->NvmeCmd->Flags & CDW15_VALID) {
549 Sq->Payload.Raw.Cdw15 = Packet->NvmeCmd->Cdw15;
550 }
551
552 //
553 // Ring the submission queue doorbell.
554 //
555 Private->SqTdbl[QueueId].Sqt++;
556 if (Private->SqTdbl[QueueId].Sqt == SqSize) {
557 Private->SqTdbl[QueueId].Sqt = 0;
558 }
559 Data32 = ReadUnaligned32 ((UINT32 *)&Private->SqTdbl[QueueId]);
560 Status = NVME_SET_SQTDBL (Private, QueueId, &Data32);
561 if (EFI_ERROR (Status)) {
562 DEBUG ((DEBUG_ERROR, "%a: NVME_SET_SQTDBL fail, Status - %r\n", __FUNCTION__, Status));
563 goto Exit;
564 }
565
566 //
567 // Wait for completion queue to get filled in.
568 //
569 Status = EFI_TIMEOUT;
570 Timer = 0;
571 while (Timer < Packet->CommandTimeout) {
572 if (Cq->Pt != Private->Pt[QueueId]) {
573 Status = EFI_SUCCESS;
574 break;
575 }
576
577 MicroSecondDelay (NVME_POLL_INTERVAL);
578 Timer += NVME_POLL_INTERVAL;
579 }
580
581 if (Status == EFI_TIMEOUT) {
582 //
583 // Timeout occurs for an NVMe command, reset the controller to abort the outstanding command
584 //
585 DEBUG ((DEBUG_ERROR, "%a: Timeout occurs for the PassThru command.\n", __FUNCTION__));
586 Status = NvmeControllerInit (Private);
587 if (EFI_ERROR (Status)) {
588 Status = EFI_DEVICE_ERROR;
589 } else {
590 //
591 // Return EFI_TIMEOUT to indicate a timeout occurs for PassThru command
592 //
593 Status = EFI_TIMEOUT;
594 }
595 goto Exit;
596 }
597
598 //
599 // Move forward the Completion Queue head
600 //
601 Private->CqHdbl[QueueId].Cqh++;
602 if (Private->CqHdbl[QueueId].Cqh == CqSize) {
603 Private->CqHdbl[QueueId].Cqh = 0;
604 Private->Pt[QueueId] ^= 1;
605 }
606
607 //
608 // Copy the Respose Queue entry for this command to the callers response buffer
609 //
610 CopyMem (Packet->NvmeCompletion, Cq, sizeof (EDKII_PEI_NVM_EXPRESS_COMPLETION));
611
612 //
613 // Check the NVMe cmd execution result
614 //
615 Status = NvmeCheckCqStatus (Cq);
616 NVME_SET_CQHDBL (Private, QueueId, &Private->CqHdbl[QueueId]);
617
618 Exit:
619 if (MapMeta != NULL) {
620 IoMmuUnmap (MapMeta);
621 }
622
623 if (MapData != NULL) {
624 IoMmuUnmap (MapData);
625 }
626
627 return Status;
628 }