]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Bus/Pci/NvmExpressPei/NvmExpressPeiPassThru.c
a33f5a872e1f69082e9a7f5733534d4a05f81ab3
[mirror_edk2.git] / MdeModulePkg / Bus / Pci / NvmExpressPei / NvmExpressPeiPassThru.c
1 /** @file
2 The NvmExpressPei driver is used to manage non-volatile memory subsystem
3 which follows NVM Express specification at PEI phase.
4
5 Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "NvmExpressPei.h"
12
13 /**
14 Create PRP lists for Data transfer which is larger than 2 memory pages.
15
16 @param[in] Private The pointer to the PEI_NVME_CONTROLLER_PRIVATE_DATA data structure.
17 @param[in] PhysicalAddr The physical base address of Data Buffer.
18 @param[in] Pages The number of pages to be transfered.
19
20 @retval The pointer Value to the first PRP List of the PRP lists.
21
22 **/
23 UINT64
24 NvmeCreatePrpList (
25 IN PEI_NVME_CONTROLLER_PRIVATE_DATA *Private,
26 IN EFI_PHYSICAL_ADDRESS PhysicalAddr,
27 IN UINTN Pages
28 )
29 {
30 UINTN PrpEntryNo;
31 UINTN PrpListNo;
32 UINT64 PrpListBase;
33 VOID *PrpListHost;
34 UINTN PrpListIndex;
35 UINTN PrpEntryIndex;
36 UINT64 Remainder;
37 EFI_PHYSICAL_ADDRESS PrpListPhyAddr;
38 UINTN Bytes;
39 UINT8 *PrpEntry;
40 EFI_PHYSICAL_ADDRESS NewPhyAddr;
41
42 //
43 // The number of Prp Entry in a memory page.
44 //
45 PrpEntryNo = EFI_PAGE_SIZE / sizeof (UINT64);
46
47 //
48 // Calculate total PrpList number.
49 //
50 PrpListNo = (UINTN) DivU64x64Remainder ((UINT64)Pages, (UINT64)PrpEntryNo, &Remainder);
51 if (Remainder != 0) {
52 PrpListNo += 1;
53 }
54
55 if (PrpListNo > NVME_PRP_SIZE) {
56 DEBUG ((
57 DEBUG_ERROR,
58 "%a: The implementation only supports PrpList number up to 4."
59 " But %d are needed here.\n",
60 __FUNCTION__,
61 PrpListNo
62 ));
63 return 0;
64 }
65 PrpListHost = (VOID *)(UINTN) NVME_PRP_BASE (Private);
66
67 Bytes = EFI_PAGES_TO_SIZE (PrpListNo);
68 PrpListPhyAddr = (UINT64)(UINTN)(PrpListHost);
69
70 //
71 // Fill all PRP lists except of last one.
72 //
73 ZeroMem (PrpListHost, Bytes);
74 for (PrpListIndex = 0; PrpListIndex < PrpListNo - 1; ++PrpListIndex) {
75 PrpListBase = (UINTN)PrpListHost + PrpListIndex * EFI_PAGE_SIZE;
76
77 for (PrpEntryIndex = 0; PrpEntryIndex < PrpEntryNo; ++PrpEntryIndex) {
78 PrpEntry = (UINT8 *)(UINTN) (PrpListBase + PrpEntryIndex * sizeof(UINT64));
79 if (PrpEntryIndex != PrpEntryNo - 1) {
80 //
81 // Fill all PRP entries except of last one.
82 //
83 CopyMem (PrpEntry, (VOID *)(UINTN) (&PhysicalAddr), sizeof (UINT64));
84 PhysicalAddr += EFI_PAGE_SIZE;
85 } else {
86 //
87 // Fill last PRP entries with next PRP List pointer.
88 //
89 NewPhyAddr = (PrpListPhyAddr + (PrpListIndex + 1) * EFI_PAGE_SIZE);
90 CopyMem (PrpEntry, (VOID *)(UINTN) (&NewPhyAddr), sizeof (UINT64));
91 }
92 }
93 }
94
95 //
96 // Fill last PRP list.
97 //
98 PrpListBase = (UINTN)PrpListHost + PrpListIndex * EFI_PAGE_SIZE;
99 for (PrpEntryIndex = 0; PrpEntryIndex < ((Remainder != 0) ? Remainder : PrpEntryNo); ++PrpEntryIndex) {
100 PrpEntry = (UINT8 *)(UINTN) (PrpListBase + PrpEntryIndex * sizeof(UINT64));
101 CopyMem (PrpEntry, (VOID *)(UINTN) (&PhysicalAddr), sizeof (UINT64));
102
103 PhysicalAddr += EFI_PAGE_SIZE;
104 }
105
106 return PrpListPhyAddr;
107 }
108
109 /**
110 Check the execution status from a given completion queue entry.
111
112 @param[in] Cq A pointer to the NVME_CQ item.
113
114 **/
115 EFI_STATUS
116 NvmeCheckCqStatus (
117 IN NVME_CQ *Cq
118 )
119 {
120 if (Cq->Sct == 0x0 && Cq->Sc == 0x0) {
121 return EFI_SUCCESS;
122 }
123
124 DEBUG ((DEBUG_INFO, "Dump NVMe Completion Entry Status from [0x%x]:\n", (UINTN)Cq));
125 DEBUG ((
126 DEBUG_INFO,
127 " SQ Identifier : [0x%x], Phase Tag : [%d], Cmd Identifier : [0x%x]\n",
128 Cq->Sqid,
129 Cq->Pt,
130 Cq->Cid
131 ));
132 DEBUG ((DEBUG_INFO, " Status Code Type : [0x%x], Status Code : [0x%x]\n", Cq->Sct, Cq->Sc));
133 DEBUG ((DEBUG_INFO, " NVMe Cmd Execution Result - "));
134
135 switch (Cq->Sct) {
136 case 0x0:
137 switch (Cq->Sc) {
138 case 0x0:
139 DEBUG ((DEBUG_INFO, "Successful Completion\n"));
140 return EFI_SUCCESS;
141 case 0x1:
142 DEBUG ((DEBUG_INFO, "Invalid Command Opcode\n"));
143 break;
144 case 0x2:
145 DEBUG ((DEBUG_INFO, "Invalid Field in Command\n"));
146 break;
147 case 0x3:
148 DEBUG ((DEBUG_INFO, "Command ID Conflict\n"));
149 break;
150 case 0x4:
151 DEBUG ((DEBUG_INFO, "Data Transfer Error\n"));
152 break;
153 case 0x5:
154 DEBUG ((DEBUG_INFO, "Commands Aborted due to Power Loss Notification\n"));
155 break;
156 case 0x6:
157 DEBUG ((DEBUG_INFO, "Internal Device Error\n"));
158 break;
159 case 0x7:
160 DEBUG ((DEBUG_INFO, "Command Abort Requested\n"));
161 break;
162 case 0x8:
163 DEBUG ((DEBUG_INFO, "Command Aborted due to SQ Deletion\n"));
164 break;
165 case 0x9:
166 DEBUG ((DEBUG_INFO, "Command Aborted due to Failed Fused Command\n"));
167 break;
168 case 0xA:
169 DEBUG ((DEBUG_INFO, "Command Aborted due to Missing Fused Command\n"));
170 break;
171 case 0xB:
172 DEBUG ((DEBUG_INFO, "Invalid Namespace or Format\n"));
173 break;
174 case 0xC:
175 DEBUG ((DEBUG_INFO, "Command Sequence Error\n"));
176 break;
177 case 0xD:
178 DEBUG ((DEBUG_INFO, "Invalid SGL Last Segment Descriptor\n"));
179 break;
180 case 0xE:
181 DEBUG ((DEBUG_INFO, "Invalid Number of SGL Descriptors\n"));
182 break;
183 case 0xF:
184 DEBUG ((DEBUG_INFO, "Data SGL Length Invalid\n"));
185 break;
186 case 0x10:
187 DEBUG ((DEBUG_INFO, "Metadata SGL Length Invalid\n"));
188 break;
189 case 0x11:
190 DEBUG ((DEBUG_INFO, "SGL Descriptor Type Invalid\n"));
191 break;
192 case 0x80:
193 DEBUG ((DEBUG_INFO, "LBA Out of Range\n"));
194 break;
195 case 0x81:
196 DEBUG ((DEBUG_INFO, "Capacity Exceeded\n"));
197 break;
198 case 0x82:
199 DEBUG ((DEBUG_INFO, "Namespace Not Ready\n"));
200 break;
201 case 0x83:
202 DEBUG ((DEBUG_INFO, "Reservation Conflict\n"));
203 break;
204 }
205 break;
206
207 case 0x1:
208 switch (Cq->Sc) {
209 case 0x0:
210 DEBUG ((DEBUG_INFO, "Completion Queue Invalid\n"));
211 break;
212 case 0x1:
213 DEBUG ((DEBUG_INFO, "Invalid Queue Identifier\n"));
214 break;
215 case 0x2:
216 DEBUG ((DEBUG_INFO, "Maximum Queue Size Exceeded\n"));
217 break;
218 case 0x3:
219 DEBUG ((DEBUG_INFO, "Abort Command Limit Exceeded\n"));
220 break;
221 case 0x5:
222 DEBUG ((DEBUG_INFO, "Asynchronous Event Request Limit Exceeded\n"));
223 break;
224 case 0x6:
225 DEBUG ((DEBUG_INFO, "Invalid Firmware Slot\n"));
226 break;
227 case 0x7:
228 DEBUG ((DEBUG_INFO, "Invalid Firmware Image\n"));
229 break;
230 case 0x8:
231 DEBUG ((DEBUG_INFO, "Invalid Interrupt Vector\n"));
232 break;
233 case 0x9:
234 DEBUG ((DEBUG_INFO, "Invalid Log Page\n"));
235 break;
236 case 0xA:
237 DEBUG ((DEBUG_INFO, "Invalid Format\n"));
238 break;
239 case 0xB:
240 DEBUG ((DEBUG_INFO, "Firmware Application Requires Conventional Reset\n"));
241 break;
242 case 0xC:
243 DEBUG ((DEBUG_INFO, "Invalid Queue Deletion\n"));
244 break;
245 case 0xD:
246 DEBUG ((DEBUG_INFO, "Feature Identifier Not Saveable\n"));
247 break;
248 case 0xE:
249 DEBUG ((DEBUG_INFO, "Feature Not Changeable\n"));
250 break;
251 case 0xF:
252 DEBUG ((DEBUG_INFO, "Feature Not Namespace Specific\n"));
253 break;
254 case 0x10:
255 DEBUG ((DEBUG_INFO, "Firmware Application Requires NVM Subsystem Reset\n"));
256 break;
257 case 0x80:
258 DEBUG ((DEBUG_INFO, "Conflicting Attributes\n"));
259 break;
260 case 0x81:
261 DEBUG ((DEBUG_INFO, "Invalid Protection Information\n"));
262 break;
263 case 0x82:
264 DEBUG ((DEBUG_INFO, "Attempted Write to Read Only Range\n"));
265 break;
266 }
267 break;
268
269 case 0x2:
270 switch (Cq->Sc) {
271 case 0x80:
272 DEBUG ((DEBUG_INFO, "Write Fault\n"));
273 break;
274 case 0x81:
275 DEBUG ((DEBUG_INFO, "Unrecovered Read Error\n"));
276 break;
277 case 0x82:
278 DEBUG ((DEBUG_INFO, "End-to-end Guard Check Error\n"));
279 break;
280 case 0x83:
281 DEBUG ((DEBUG_INFO, "End-to-end Application Tag Check Error\n"));
282 break;
283 case 0x84:
284 DEBUG ((DEBUG_INFO, "End-to-end Reference Tag Check Error\n"));
285 break;
286 case 0x85:
287 DEBUG ((DEBUG_INFO, "Compare Failure\n"));
288 break;
289 case 0x86:
290 DEBUG ((DEBUG_INFO, "Access Denied\n"));
291 break;
292 }
293 break;
294
295 default:
296 DEBUG ((DEBUG_INFO, "Unknown error\n"));
297 break;
298 }
299
300 return EFI_DEVICE_ERROR;
301 }
302
303 /**
304 Sends an NVM Express Command Packet to an NVM Express controller or namespace. This function only
305 supports blocking execution of the command.
306
307 @param[in] Private The pointer to the NVME_CONTEXT Data structure.
308 @param[in] NamespaceId Is a 32 bit Namespace ID to which the Express HCI command packet will
309 be sent.
310 A Value of 0 denotes the NVM Express controller, a Value of all 0FFh in
311 the namespace ID specifies that the command packet should be sent to all
312 valid namespaces.
313 @param[in,out] Packet A pointer to the EDKII PEI NVM Express PassThru Command Packet to send
314 to the NVMe namespace specified by NamespaceId.
315
316 @retval EFI_SUCCESS The EDKII PEI NVM Express Command Packet was sent by the host.
317 TransferLength bytes were transferred to, or from DataBuffer.
318 @retval EFI_NOT_READY The EDKII PEI NVM Express Command Packet could not be sent because
319 the controller is not ready. The caller may retry again later.
320 @retval EFI_DEVICE_ERROR A device error occurred while attempting to send the EDKII PEI NVM
321 Express Command Packet.
322 @retval EFI_INVALID_PARAMETER Namespace, or the contents of EDKII_PEI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET
323 are invalid.
324 The EDKII PEI NVM Express Command Packet was not sent, so no
325 additional status information is available.
326 @retval EFI_UNSUPPORTED The command described by the EDKII PEI NVM Express Command Packet
327 is not supported by the host adapter.
328 The EDKII PEI NVM Express Command Packet was not sent, so no
329 additional status information is available.
330 @retval EFI_TIMEOUT A timeout occurred while waiting for the EDKII PEI NVM Express Command
331 Packet to execute.
332
333 **/
334 EFI_STATUS
335 NvmePassThru (
336 IN PEI_NVME_CONTROLLER_PRIVATE_DATA *Private,
337 IN UINT32 NamespaceId,
338 IN OUT EDKII_PEI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET *Packet
339 )
340 {
341 EFI_STATUS Status;
342 NVME_SQ *Sq;
343 NVME_CQ *Cq;
344 UINT8 QueueId;
345 UINTN SqSize;
346 UINTN CqSize;
347 EDKII_IOMMU_OPERATION MapOp;
348 UINTN MapLength;
349 EFI_PHYSICAL_ADDRESS PhyAddr;
350 VOID *MapData;
351 VOID *MapMeta;
352 UINT32 Bytes;
353 UINT32 Offset;
354 UINT32 Data32;
355 UINT64 Timer;
356
357 //
358 // Check the data fields in Packet parameter
359 //
360 if (Packet == NULL) {
361 DEBUG ((
362 DEBUG_ERROR,
363 "%a, Invalid parameter: Packet(%lx)\n",
364 __FUNCTION__,
365 (UINTN)Packet
366 ));
367 return EFI_INVALID_PARAMETER;
368 }
369
370 if ((Packet->NvmeCmd == NULL) || (Packet->NvmeCompletion == NULL)) {
371 DEBUG ((
372 DEBUG_ERROR,
373 "%a, Invalid parameter: NvmeCmd (%lx)/NvmeCompletion(%lx)\n",
374 __FUNCTION__,
375 (UINTN)Packet->NvmeCmd,
376 (UINTN)Packet->NvmeCompletion
377 ));
378 return EFI_INVALID_PARAMETER;
379 }
380
381 if (Packet->QueueType != NVME_ADMIN_QUEUE && Packet->QueueType != NVME_IO_QUEUE) {
382 DEBUG ((
383 DEBUG_ERROR,
384 "%a, Invalid parameter: QueueId(%lx)\n",
385 __FUNCTION__,
386 (UINTN)Packet->QueueType
387 ));
388 return EFI_INVALID_PARAMETER;
389 }
390
391 QueueId = Packet->QueueType;
392 Sq = Private->SqBuffer[QueueId] + Private->SqTdbl[QueueId].Sqt;
393 Cq = Private->CqBuffer[QueueId] + Private->CqHdbl[QueueId].Cqh;
394 if (QueueId == NVME_ADMIN_QUEUE) {
395 SqSize = NVME_ASQ_SIZE + 1;
396 CqSize = NVME_ACQ_SIZE + 1;
397 } else {
398 SqSize = NVME_CSQ_SIZE + 1;
399 CqSize = NVME_CCQ_SIZE + 1;
400 }
401
402 if (Packet->NvmeCmd->Nsid != NamespaceId) {
403 DEBUG ((
404 DEBUG_ERROR,
405 "%a: Nsid mismatch (%x, %x)\n",
406 __FUNCTION__,
407 Packet->NvmeCmd->Nsid,
408 NamespaceId
409 ));
410 return EFI_INVALID_PARAMETER;
411 }
412
413 ZeroMem (Sq, sizeof (NVME_SQ));
414 Sq->Opc = Packet->NvmeCmd->Cdw0.Opcode;
415 Sq->Fuse = Packet->NvmeCmd->Cdw0.FusedOperation;
416 Sq->Cid = Packet->NvmeCmd->Cdw0.Cid;
417 Sq->Nsid = Packet->NvmeCmd->Nsid;
418
419 //
420 // Currently we only support PRP for data transfer, SGL is NOT supported
421 //
422 ASSERT (Sq->Psdt == 0);
423 if (Sq->Psdt != 0) {
424 DEBUG ((DEBUG_ERROR, "%a: Does not support SGL mechanism.\n", __FUNCTION__));
425 return EFI_UNSUPPORTED;
426 }
427
428 Sq->Prp[0] = (UINT64)(UINTN)Packet->TransferBuffer;
429 Sq->Prp[1] = 0;
430 MapData = NULL;
431 MapMeta = NULL;
432 Status = EFI_SUCCESS;
433 //
434 // If the NVMe cmd has data in or out, then mapping the user buffer to the PCI controller
435 // specific addresses.
436 //
437 if ((Sq->Opc & (BIT0 | BIT1)) != 0) {
438 if (((Packet->TransferLength != 0) && (Packet->TransferBuffer == NULL)) ||
439 ((Packet->TransferLength == 0) && (Packet->TransferBuffer != NULL))) {
440 return EFI_INVALID_PARAMETER;
441 }
442
443 //
444 // Currently, we only support creating IO submission/completion queues that are
445 // allocated internally by the driver.
446 //
447 if ((Packet->QueueType == NVME_ADMIN_QUEUE) &&
448 ((Sq->Opc == NVME_ADMIN_CRIOCQ_CMD) || (Sq->Opc == NVME_ADMIN_CRIOSQ_CMD))) {
449 if ((Packet->TransferBuffer != Private->SqBuffer[NVME_IO_QUEUE]) &&
450 (Packet->TransferBuffer != Private->CqBuffer[NVME_IO_QUEUE])) {
451 DEBUG ((
452 DEBUG_ERROR,
453 "%a: Does not support external IO queues creation request.\n",
454 __FUNCTION__
455 ));
456 return EFI_UNSUPPORTED;
457 }
458 } else {
459 if ((Sq->Opc & BIT0) != 0) {
460 MapOp = EdkiiIoMmuOperationBusMasterRead;
461 } else {
462 MapOp = EdkiiIoMmuOperationBusMasterWrite;
463 }
464
465 if ((Packet->TransferLength != 0) && (Packet->TransferBuffer != NULL)) {
466 MapLength = Packet->TransferLength;
467 Status = IoMmuMap (
468 MapOp,
469 Packet->TransferBuffer,
470 &MapLength,
471 &PhyAddr,
472 &MapData
473 );
474 if (EFI_ERROR (Status) || (MapLength != Packet->TransferLength)) {
475 Status = EFI_OUT_OF_RESOURCES;
476 DEBUG ((DEBUG_ERROR, "%a: Fail to map data buffer.\n", __FUNCTION__));
477 goto Exit;
478 }
479
480 Sq->Prp[0] = PhyAddr;
481 }
482
483 if((Packet->MetadataLength != 0) && (Packet->MetadataBuffer != NULL)) {
484 MapLength = Packet->MetadataLength;
485 Status = IoMmuMap (
486 MapOp,
487 Packet->MetadataBuffer,
488 &MapLength,
489 &PhyAddr,
490 &MapMeta
491 );
492 if (EFI_ERROR (Status) || (MapLength != Packet->MetadataLength)) {
493 Status = EFI_OUT_OF_RESOURCES;
494 DEBUG ((DEBUG_ERROR, "%a: Fail to map meta data buffer.\n", __FUNCTION__));
495 goto Exit;
496 }
497 Sq->Mptr = PhyAddr;
498 }
499 }
500 }
501
502 //
503 // If the Buffer Size spans more than two memory pages (page Size as defined in CC.Mps),
504 // then build a PRP list in the second PRP submission queue entry.
505 //
506 Offset = ((UINT32)Sq->Prp[0]) & (EFI_PAGE_SIZE - 1);
507 Bytes = Packet->TransferLength;
508
509 if ((Offset + Bytes) > (EFI_PAGE_SIZE * 2)) {
510 //
511 // Create PrpList for remaining Data Buffer.
512 //
513 PhyAddr = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1);
514 Sq->Prp[1] = NvmeCreatePrpList (
515 Private,
516 PhyAddr,
517 EFI_SIZE_TO_PAGES(Offset + Bytes) - 1
518 );
519 if (Sq->Prp[1] == 0) {
520 Status = EFI_OUT_OF_RESOURCES;
521 DEBUG ((DEBUG_ERROR, "%a: Create PRP list fail, Status - %r\n", __FUNCTION__, Status));
522 goto Exit;
523 }
524
525 } else if ((Offset + Bytes) > EFI_PAGE_SIZE) {
526 Sq->Prp[1] = (Sq->Prp[0] + EFI_PAGE_SIZE) & ~(EFI_PAGE_SIZE - 1);
527 }
528
529 if (Packet->NvmeCmd->Flags & CDW10_VALID) {
530 Sq->Payload.Raw.Cdw10 = Packet->NvmeCmd->Cdw10;
531 }
532 if (Packet->NvmeCmd->Flags & CDW11_VALID) {
533 Sq->Payload.Raw.Cdw11 = Packet->NvmeCmd->Cdw11;
534 }
535 if (Packet->NvmeCmd->Flags & CDW12_VALID) {
536 Sq->Payload.Raw.Cdw12 = Packet->NvmeCmd->Cdw12;
537 }
538 if (Packet->NvmeCmd->Flags & CDW13_VALID) {
539 Sq->Payload.Raw.Cdw13 = Packet->NvmeCmd->Cdw13;
540 }
541 if (Packet->NvmeCmd->Flags & CDW14_VALID) {
542 Sq->Payload.Raw.Cdw14 = Packet->NvmeCmd->Cdw14;
543 }
544 if (Packet->NvmeCmd->Flags & CDW15_VALID) {
545 Sq->Payload.Raw.Cdw15 = Packet->NvmeCmd->Cdw15;
546 }
547
548 //
549 // Ring the submission queue doorbell.
550 //
551 Private->SqTdbl[QueueId].Sqt++;
552 if (Private->SqTdbl[QueueId].Sqt == SqSize) {
553 Private->SqTdbl[QueueId].Sqt = 0;
554 }
555 Data32 = ReadUnaligned32 ((UINT32 *)&Private->SqTdbl[QueueId]);
556 Status = NVME_SET_SQTDBL (Private, QueueId, &Data32);
557 if (EFI_ERROR (Status)) {
558 DEBUG ((DEBUG_ERROR, "%a: NVME_SET_SQTDBL fail, Status - %r\n", __FUNCTION__, Status));
559 goto Exit;
560 }
561
562 //
563 // Wait for completion queue to get filled in.
564 //
565 Status = EFI_TIMEOUT;
566 Timer = 0;
567 while (Timer < Packet->CommandTimeout) {
568 if (Cq->Pt != Private->Pt[QueueId]) {
569 Status = EFI_SUCCESS;
570 break;
571 }
572
573 MicroSecondDelay (NVME_POLL_INTERVAL);
574 Timer += NVME_POLL_INTERVAL;
575 }
576
577 if (Status == EFI_TIMEOUT) {
578 //
579 // Timeout occurs for an NVMe command, reset the controller to abort the outstanding command
580 //
581 DEBUG ((DEBUG_ERROR, "%a: Timeout occurs for the PassThru command.\n", __FUNCTION__));
582 Status = NvmeControllerInit (Private);
583 if (EFI_ERROR (Status)) {
584 Status = EFI_DEVICE_ERROR;
585 } else {
586 //
587 // Return EFI_TIMEOUT to indicate a timeout occurs for PassThru command
588 //
589 Status = EFI_TIMEOUT;
590 }
591 goto Exit;
592 }
593
594 //
595 // Move forward the Completion Queue head
596 //
597 Private->CqHdbl[QueueId].Cqh++;
598 if (Private->CqHdbl[QueueId].Cqh == CqSize) {
599 Private->CqHdbl[QueueId].Cqh = 0;
600 Private->Pt[QueueId] ^= 1;
601 }
602
603 //
604 // Copy the Respose Queue entry for this command to the callers response buffer
605 //
606 CopyMem (Packet->NvmeCompletion, Cq, sizeof (EDKII_PEI_NVM_EXPRESS_COMPLETION));
607
608 //
609 // Check the NVMe cmd execution result
610 //
611 Status = NvmeCheckCqStatus (Cq);
612 NVME_SET_CQHDBL (Private, QueueId, &Private->CqHdbl[QueueId]);
613
614 Exit:
615 if (MapMeta != NULL) {
616 IoMmuUnmap (MapMeta);
617 }
618
619 if (MapData != NULL) {
620 IoMmuUnmap (MapData);
621 }
622
623 return Status;
624 }