}\r
\r
Sq->Prp[0] = (UINT64)(UINTN)Packet->TransferBuffer;\r
- //\r
- // If the NVMe cmd has data in or out, then mapping the user buffer to the PCI controller specific addresses.\r
- // Note here we don't handle data buffer for CreateIOSubmitionQueue and CreateIOCompletionQueue cmds because\r
- // these two cmds are special which requires their data buffer must support simultaneous access by both the\r
- // processor and a PCI Bus Master. It's caller's responsbility to ensure this.\r
- //\r
- if (((Sq->Opc & (BIT0 | BIT1)) != 0) &&\r
- !((Packet->QueueType == NVME_ADMIN_QUEUE) && ((Sq->Opc == NVME_ADMIN_CRIOCQ_CMD) || (Sq->Opc == NVME_ADMIN_CRIOSQ_CMD)))) {\r
+ if ((Packet->QueueType == NVME_ADMIN_QUEUE) &&\r
+ ((Sq->Opc == NVME_ADMIN_CRIOCQ_CMD) || (Sq->Opc == NVME_ADMIN_CRIOSQ_CMD))) {\r
+ //\r
+ // Currently, we only use the IO Completion/Submission queues created internally\r
+ // by this driver during controller initialization. Any other IO queues created\r
+ // will not be consumed here. The value is little to accept external IO queue\r
+ // creation requests, so here we will return EFI_UNSUPPORTED for external IO\r
+ // queue creation request.\r
+ //\r
+ if (!Private->CreateIoQueue) {\r
+ DEBUG ((DEBUG_ERROR, "NvmExpressPassThru: Does not support external IO queues creation request.\n"));\r
+ return EFI_UNSUPPORTED;\r
+ }\r
+ } else if ((Sq->Opc & (BIT0 | BIT1)) != 0) {\r
+ //\r
+ // If the NVMe cmd has data in or out, then mapping the user buffer to the PCI controller specific addresses.\r
+ //\r
if (((Packet->TransferLength != 0) && (Packet->TransferBuffer == NULL)) ||\r
((Packet->TransferLength == 0) && (Packet->TransferBuffer != NULL))) {\r
return EFI_INVALID_PARAMETER;\r