2 The NvmExpressPei driver is used to manage non-volatile memory subsystem
3 which follows NVM Express specification at PEI phase.
5 Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions
9 of the BSD License which accompanies this distribution. The
10 full text of the license may be found at
11 http://opensource.org/licenses/bsd-license.php
13 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
14 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
18 #include "NvmExpressPei.h"
21 Create PRP lists for Data transfer which is larger than 2 memory pages.
23 @param[in] Private The pointer to the PEI_NVME_CONTROLLER_PRIVATE_DATA data structure.
24 @param[in] PhysicalAddr The physical base address of Data Buffer.
25 @param[in] Pages The number of pages to be transfered.
27 @retval The pointer Value to the first PRP List of the PRP lists.
32 IN PEI_NVME_CONTROLLER_PRIVATE_DATA
*Private
,
33 IN EFI_PHYSICAL_ADDRESS PhysicalAddr
,
44 EFI_PHYSICAL_ADDRESS PrpListPhyAddr
;
47 EFI_PHYSICAL_ADDRESS NewPhyAddr
;
50 // The number of Prp Entry in a memory page.
52 PrpEntryNo
= EFI_PAGE_SIZE
/ sizeof (UINT64
);
55 // Calculate total PrpList number.
57 PrpListNo
= (UINTN
) DivU64x64Remainder ((UINT64
)Pages
, (UINT64
)PrpEntryNo
, &Remainder
);
62 if (PrpListNo
> NVME_PRP_SIZE
) {
65 "%a: The implementation only supports PrpList number up to 4."
66 " But %d are needed here.\n",
72 PrpListHost
= (VOID
*)(UINTN
) NVME_PRP_BASE (Private
);
74 Bytes
= EFI_PAGES_TO_SIZE (PrpListNo
);
75 PrpListPhyAddr
= (UINT64
)(UINTN
)(PrpListHost
);
78 // Fill all PRP lists except of last one.
80 ZeroMem (PrpListHost
, Bytes
);
81 for (PrpListIndex
= 0; PrpListIndex
< PrpListNo
- 1; ++PrpListIndex
) {
82 PrpListBase
= (UINTN
)PrpListHost
+ PrpListIndex
* EFI_PAGE_SIZE
;
84 for (PrpEntryIndex
= 0; PrpEntryIndex
< PrpEntryNo
; ++PrpEntryIndex
) {
85 PrpEntry
= (UINT8
*)(UINTN
) (PrpListBase
+ PrpEntryIndex
* sizeof(UINT64
));
86 if (PrpEntryIndex
!= PrpEntryNo
- 1) {
88 // Fill all PRP entries except of last one.
90 CopyMem (PrpEntry
, (VOID
*)(UINTN
) (&PhysicalAddr
), sizeof (UINT64
));
91 PhysicalAddr
+= EFI_PAGE_SIZE
;
94 // Fill last PRP entries with next PRP List pointer.
96 NewPhyAddr
= (PrpListPhyAddr
+ (PrpListIndex
+ 1) * EFI_PAGE_SIZE
);
97 CopyMem (PrpEntry
, (VOID
*)(UINTN
) (&NewPhyAddr
), sizeof (UINT64
));
103 // Fill last PRP list.
105 PrpListBase
= (UINTN
)PrpListHost
+ PrpListIndex
* EFI_PAGE_SIZE
;
106 for (PrpEntryIndex
= 0; PrpEntryIndex
< ((Remainder
!= 0) ? Remainder
: PrpEntryNo
); ++PrpEntryIndex
) {
107 PrpEntry
= (UINT8
*)(UINTN
) (PrpListBase
+ PrpEntryIndex
* sizeof(UINT64
));
108 CopyMem (PrpEntry
, (VOID
*)(UINTN
) (&PhysicalAddr
), sizeof (UINT64
));
110 PhysicalAddr
+= EFI_PAGE_SIZE
;
113 return PrpListPhyAddr
;
117 Check the execution status from a given completion queue entry.
119 @param[in] Cq A pointer to the NVME_CQ item.
127 if (Cq
->Sct
== 0x0 && Cq
->Sc
== 0x0) {
131 DEBUG ((DEBUG_INFO
, "Dump NVMe Completion Entry Status from [0x%x]:\n", (UINTN
)Cq
));
134 " SQ Identifier : [0x%x], Phase Tag : [%d], Cmd Identifier : [0x%x]\n",
139 DEBUG ((DEBUG_INFO
, " Status Code Type : [0x%x], Status Code : [0x%x]\n", Cq
->Sct
, Cq
->Sc
));
140 DEBUG ((DEBUG_INFO
, " NVMe Cmd Execution Result - "));
146 DEBUG ((DEBUG_INFO
, "Successful Completion\n"));
149 DEBUG ((DEBUG_INFO
, "Invalid Command Opcode\n"));
152 DEBUG ((DEBUG_INFO
, "Invalid Field in Command\n"));
155 DEBUG ((DEBUG_INFO
, "Command ID Conflict\n"));
158 DEBUG ((DEBUG_INFO
, "Data Transfer Error\n"));
161 DEBUG ((DEBUG_INFO
, "Commands Aborted due to Power Loss Notification\n"));
164 DEBUG ((DEBUG_INFO
, "Internal Device Error\n"));
167 DEBUG ((DEBUG_INFO
, "Command Abort Requested\n"));
170 DEBUG ((DEBUG_INFO
, "Command Aborted due to SQ Deletion\n"));
173 DEBUG ((DEBUG_INFO
, "Command Aborted due to Failed Fused Command\n"));
176 DEBUG ((DEBUG_INFO
, "Command Aborted due to Missing Fused Command\n"));
179 DEBUG ((DEBUG_INFO
, "Invalid Namespace or Format\n"));
182 DEBUG ((DEBUG_INFO
, "Command Sequence Error\n"));
185 DEBUG ((DEBUG_INFO
, "Invalid SGL Last Segment Descriptor\n"));
188 DEBUG ((DEBUG_INFO
, "Invalid Number of SGL Descriptors\n"));
191 DEBUG ((DEBUG_INFO
, "Data SGL Length Invalid\n"));
194 DEBUG ((DEBUG_INFO
, "Metadata SGL Length Invalid\n"));
197 DEBUG ((DEBUG_INFO
, "SGL Descriptor Type Invalid\n"));
200 DEBUG ((DEBUG_INFO
, "LBA Out of Range\n"));
203 DEBUG ((DEBUG_INFO
, "Capacity Exceeded\n"));
206 DEBUG ((DEBUG_INFO
, "Namespace Not Ready\n"));
209 DEBUG ((DEBUG_INFO
, "Reservation Conflict\n"));
217 DEBUG ((DEBUG_INFO
, "Completion Queue Invalid\n"));
220 DEBUG ((DEBUG_INFO
, "Invalid Queue Identifier\n"));
223 DEBUG ((DEBUG_INFO
, "Maximum Queue Size Exceeded\n"));
226 DEBUG ((DEBUG_INFO
, "Abort Command Limit Exceeded\n"));
229 DEBUG ((DEBUG_INFO
, "Asynchronous Event Request Limit Exceeded\n"));
232 DEBUG ((DEBUG_INFO
, "Invalid Firmware Slot\n"));
235 DEBUG ((DEBUG_INFO
, "Invalid Firmware Image\n"));
238 DEBUG ((DEBUG_INFO
, "Invalid Interrupt Vector\n"));
241 DEBUG ((DEBUG_INFO
, "Invalid Log Page\n"));
244 DEBUG ((DEBUG_INFO
, "Invalid Format\n"));
247 DEBUG ((DEBUG_INFO
, "Firmware Application Requires Conventional Reset\n"));
250 DEBUG ((DEBUG_INFO
, "Invalid Queue Deletion\n"));
253 DEBUG ((DEBUG_INFO
, "Feature Identifier Not Saveable\n"));
256 DEBUG ((DEBUG_INFO
, "Feature Not Changeable\n"));
259 DEBUG ((DEBUG_INFO
, "Feature Not Namespace Specific\n"));
262 DEBUG ((DEBUG_INFO
, "Firmware Application Requires NVM Subsystem Reset\n"));
265 DEBUG ((DEBUG_INFO
, "Conflicting Attributes\n"));
268 DEBUG ((DEBUG_INFO
, "Invalid Protection Information\n"));
271 DEBUG ((DEBUG_INFO
, "Attempted Write to Read Only Range\n"));
279 DEBUG ((DEBUG_INFO
, "Write Fault\n"));
282 DEBUG ((DEBUG_INFO
, "Unrecovered Read Error\n"));
285 DEBUG ((DEBUG_INFO
, "End-to-end Guard Check Error\n"));
288 DEBUG ((DEBUG_INFO
, "End-to-end Application Tag Check Error\n"));
291 DEBUG ((DEBUG_INFO
, "End-to-end Reference Tag Check Error\n"));
294 DEBUG ((DEBUG_INFO
, "Compare Failure\n"));
297 DEBUG ((DEBUG_INFO
, "Access Denied\n"));
303 DEBUG ((DEBUG_INFO
, "Unknown error\n"));
307 return EFI_DEVICE_ERROR
;
311 Sends an NVM Express Command Packet to an NVM Express controller or namespace. This function only
312 supports blocking execution of the command.
314 @param[in] Private The pointer to the NVME_CONTEXT Data structure.
315 @param[in] NamespaceId Is a 32 bit Namespace ID to which the Express HCI command packet will
317 A Value of 0 denotes the NVM Express controller, a Value of all 0FFh in
318 the namespace ID specifies that the command packet should be sent to all
320 @param[in,out] Packet A pointer to the EDKII PEI NVM Express PassThru Command Packet to send
321 to the NVMe namespace specified by NamespaceId.
323 @retval EFI_SUCCESS The EDKII PEI NVM Express Command Packet was sent by the host.
324 TransferLength bytes were transferred to, or from DataBuffer.
325 @retval EFI_NOT_READY The EDKII PEI NVM Express Command Packet could not be sent because
326 the controller is not ready. The caller may retry again later.
327 @retval EFI_DEVICE_ERROR A device error occurred while attempting to send the EDKII PEI NVM
328 Express Command Packet.
329 @retval EFI_INVALID_PARAMETER Namespace, or the contents of EDKII_PEI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET
331 The EDKII PEI NVM Express Command Packet was not sent, so no
332 additional status information is available.
333 @retval EFI_UNSUPPORTED The command described by the EDKII PEI NVM Express Command Packet
334 is not supported by the host adapter.
335 The EDKII PEI NVM Express Command Packet was not sent, so no
336 additional status information is available.
337 @retval EFI_TIMEOUT A timeout occurred while waiting for the EDKII PEI NVM Express Command
343 IN PEI_NVME_CONTROLLER_PRIVATE_DATA
*Private
,
344 IN UINT32 NamespaceId
,
345 IN OUT EDKII_PEI_NVM_EXPRESS_PASS_THRU_COMMAND_PACKET
*Packet
354 EDKII_IOMMU_OPERATION MapOp
;
356 EFI_PHYSICAL_ADDRESS PhyAddr
;
365 // Check the data fields in Packet parameter
367 if (Packet
== NULL
) {
370 "%a, Invalid parameter: Packet(%lx)\n",
374 return EFI_INVALID_PARAMETER
;
377 if ((Packet
->NvmeCmd
== NULL
) || (Packet
->NvmeCompletion
== NULL
)) {
380 "%a, Invalid parameter: NvmeCmd (%lx)/NvmeCompletion(%lx)\n",
382 (UINTN
)Packet
->NvmeCmd
,
383 (UINTN
)Packet
->NvmeCompletion
385 return EFI_INVALID_PARAMETER
;
388 if (Packet
->QueueType
!= NVME_ADMIN_QUEUE
&& Packet
->QueueType
!= NVME_IO_QUEUE
) {
391 "%a, Invalid parameter: QueueId(%lx)\n",
393 (UINTN
)Packet
->QueueType
395 return EFI_INVALID_PARAMETER
;
398 QueueId
= Packet
->QueueType
;
399 Sq
= Private
->SqBuffer
[QueueId
] + Private
->SqTdbl
[QueueId
].Sqt
;
400 Cq
= Private
->CqBuffer
[QueueId
] + Private
->CqHdbl
[QueueId
].Cqh
;
401 if (QueueId
== NVME_ADMIN_QUEUE
) {
402 SqSize
= NVME_ASQ_SIZE
+ 1;
403 CqSize
= NVME_ACQ_SIZE
+ 1;
405 SqSize
= NVME_CSQ_SIZE
+ 1;
406 CqSize
= NVME_CCQ_SIZE
+ 1;
409 if (Packet
->NvmeCmd
->Nsid
!= NamespaceId
) {
412 "%a: Nsid mismatch (%x, %x)\n",
414 Packet
->NvmeCmd
->Nsid
,
417 return EFI_INVALID_PARAMETER
;
420 ZeroMem (Sq
, sizeof (NVME_SQ
));
421 Sq
->Opc
= Packet
->NvmeCmd
->Cdw0
.Opcode
;
422 Sq
->Fuse
= Packet
->NvmeCmd
->Cdw0
.FusedOperation
;
423 Sq
->Cid
= Packet
->NvmeCmd
->Cdw0
.Cid
;
424 Sq
->Nsid
= Packet
->NvmeCmd
->Nsid
;
427 // Currently we only support PRP for data transfer, SGL is NOT supported
429 ASSERT (Sq
->Psdt
== 0);
431 DEBUG ((DEBUG_ERROR
, "%a: Does not support SGL mechanism.\n", __FUNCTION__
));
432 return EFI_UNSUPPORTED
;
435 Sq
->Prp
[0] = (UINT64
)(UINTN
)Packet
->TransferBuffer
;
439 Status
= EFI_SUCCESS
;
441 // If the NVMe cmd has data in or out, then mapping the user buffer to the PCI controller
442 // specific addresses.
444 if ((Sq
->Opc
& (BIT0
| BIT1
)) != 0) {
445 if (((Packet
->TransferLength
!= 0) && (Packet
->TransferBuffer
== NULL
)) ||
446 ((Packet
->TransferLength
== 0) && (Packet
->TransferBuffer
!= NULL
))) {
447 return EFI_INVALID_PARAMETER
;
451 // Currently, we only support creating IO submission/completion queues that are
452 // allocated internally by the driver.
454 if ((Packet
->QueueType
== NVME_ADMIN_QUEUE
) &&
455 ((Sq
->Opc
== NVME_ADMIN_CRIOCQ_CMD
) || (Sq
->Opc
== NVME_ADMIN_CRIOSQ_CMD
))) {
456 if ((Packet
->TransferBuffer
!= Private
->SqBuffer
[NVME_IO_QUEUE
]) &&
457 (Packet
->TransferBuffer
!= Private
->CqBuffer
[NVME_IO_QUEUE
])) {
460 "%a: Does not support external IO queues creation request.\n",
463 return EFI_UNSUPPORTED
;
466 if ((Sq
->Opc
& BIT0
) != 0) {
467 MapOp
= EdkiiIoMmuOperationBusMasterRead
;
469 MapOp
= EdkiiIoMmuOperationBusMasterWrite
;
472 if ((Packet
->TransferLength
!= 0) && (Packet
->TransferBuffer
!= NULL
)) {
473 MapLength
= Packet
->TransferLength
;
476 Packet
->TransferBuffer
,
481 if (EFI_ERROR (Status
) || (MapLength
!= Packet
->TransferLength
)) {
482 Status
= EFI_OUT_OF_RESOURCES
;
483 DEBUG ((DEBUG_ERROR
, "%a: Fail to map data buffer.\n", __FUNCTION__
));
487 Sq
->Prp
[0] = PhyAddr
;
490 if((Packet
->MetadataLength
!= 0) && (Packet
->MetadataBuffer
!= NULL
)) {
491 MapLength
= Packet
->MetadataLength
;
494 Packet
->MetadataBuffer
,
499 if (EFI_ERROR (Status
) || (MapLength
!= Packet
->MetadataLength
)) {
500 Status
= EFI_OUT_OF_RESOURCES
;
501 DEBUG ((DEBUG_ERROR
, "%a: Fail to map meta data buffer.\n", __FUNCTION__
));
510 // If the Buffer Size spans more than two memory pages (page Size as defined in CC.Mps),
511 // then build a PRP list in the second PRP submission queue entry.
513 Offset
= ((UINT32
)Sq
->Prp
[0]) & (EFI_PAGE_SIZE
- 1);
514 Bytes
= Packet
->TransferLength
;
516 if ((Offset
+ Bytes
) > (EFI_PAGE_SIZE
* 2)) {
518 // Create PrpList for remaining Data Buffer.
520 PhyAddr
= (Sq
->Prp
[0] + EFI_PAGE_SIZE
) & ~(EFI_PAGE_SIZE
- 1);
521 Sq
->Prp
[1] = NvmeCreatePrpList (
524 EFI_SIZE_TO_PAGES(Offset
+ Bytes
) - 1
526 if (Sq
->Prp
[1] == 0) {
527 Status
= EFI_OUT_OF_RESOURCES
;
528 DEBUG ((DEBUG_ERROR
, "%a: Create PRP list fail, Status - %r\n", __FUNCTION__
, Status
));
532 } else if ((Offset
+ Bytes
) > EFI_PAGE_SIZE
) {
533 Sq
->Prp
[1] = (Sq
->Prp
[0] + EFI_PAGE_SIZE
) & ~(EFI_PAGE_SIZE
- 1);
536 if (Packet
->NvmeCmd
->Flags
& CDW10_VALID
) {
537 Sq
->Payload
.Raw
.Cdw10
= Packet
->NvmeCmd
->Cdw10
;
539 if (Packet
->NvmeCmd
->Flags
& CDW11_VALID
) {
540 Sq
->Payload
.Raw
.Cdw11
= Packet
->NvmeCmd
->Cdw11
;
542 if (Packet
->NvmeCmd
->Flags
& CDW12_VALID
) {
543 Sq
->Payload
.Raw
.Cdw12
= Packet
->NvmeCmd
->Cdw12
;
545 if (Packet
->NvmeCmd
->Flags
& CDW13_VALID
) {
546 Sq
->Payload
.Raw
.Cdw13
= Packet
->NvmeCmd
->Cdw13
;
548 if (Packet
->NvmeCmd
->Flags
& CDW14_VALID
) {
549 Sq
->Payload
.Raw
.Cdw14
= Packet
->NvmeCmd
->Cdw14
;
551 if (Packet
->NvmeCmd
->Flags
& CDW15_VALID
) {
552 Sq
->Payload
.Raw
.Cdw15
= Packet
->NvmeCmd
->Cdw15
;
556 // Ring the submission queue doorbell.
558 Private
->SqTdbl
[QueueId
].Sqt
++;
559 if (Private
->SqTdbl
[QueueId
].Sqt
== SqSize
) {
560 Private
->SqTdbl
[QueueId
].Sqt
= 0;
562 Data32
= ReadUnaligned32 ((UINT32
*)&Private
->SqTdbl
[QueueId
]);
563 Status
= NVME_SET_SQTDBL (Private
, QueueId
, &Data32
);
564 if (EFI_ERROR (Status
)) {
565 DEBUG ((DEBUG_ERROR
, "%a: NVME_SET_SQTDBL fail, Status - %r\n", __FUNCTION__
, Status
));
570 // Wait for completion queue to get filled in.
572 Status
= EFI_TIMEOUT
;
574 while (Timer
< Packet
->CommandTimeout
) {
575 if (Cq
->Pt
!= Private
->Pt
[QueueId
]) {
576 Status
= EFI_SUCCESS
;
580 MicroSecondDelay (NVME_POLL_INTERVAL
);
581 Timer
+= NVME_POLL_INTERVAL
;
584 if (Status
== EFI_TIMEOUT
) {
586 // Timeout occurs for an NVMe command, reset the controller to abort the outstanding command
588 DEBUG ((DEBUG_ERROR
, "%a: Timeout occurs for the PassThru command.\n", __FUNCTION__
));
589 Status
= NvmeControllerInit (Private
);
590 if (EFI_ERROR (Status
)) {
591 Status
= EFI_DEVICE_ERROR
;
594 // Return EFI_TIMEOUT to indicate a timeout occurs for PassThru command
596 Status
= EFI_TIMEOUT
;
602 // Move forward the Completion Queue head
604 Private
->CqHdbl
[QueueId
].Cqh
++;
605 if (Private
->CqHdbl
[QueueId
].Cqh
== CqSize
) {
606 Private
->CqHdbl
[QueueId
].Cqh
= 0;
607 Private
->Pt
[QueueId
] ^= 1;
611 // Copy the Respose Queue entry for this command to the callers response buffer
613 CopyMem (Packet
->NvmeCompletion
, Cq
, sizeof (EDKII_PEI_NVM_EXPRESS_COMPLETION
));
616 // Check the NVMe cmd execution result
618 Status
= NvmeCheckCqStatus (Cq
);
619 NVME_SET_CQHDBL (Private
, QueueId
, &Private
->CqHdbl
[QueueId
]);
622 if (MapMeta
!= NULL
) {
623 IoMmuUnmap (MapMeta
);
626 if (MapData
!= NULL
) {
627 IoMmuUnmap (MapData
);