]> git.proxmox.com Git - mirror_qemu.git/commitdiff
Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging
authorStefan Hajnoczi <stefanha@redhat.com>
Fri, 8 Sep 2023 14:06:25 +0000 (10:06 -0400)
committerStefan Hajnoczi <stefanha@redhat.com>
Fri, 8 Sep 2023 14:06:25 +0000 (10:06 -0400)
trivial patches for 2023-09-08

# -----BEGIN PGP SIGNATURE-----
#
# iQFDBAABCAAtFiEEe3O61ovnosKJMUsicBtPaxppPlkFAmT68tMPHG1qdEB0bHMu
# bXNrLnJ1AAoJEHAbT2saaT5ZbEwH/2XcX1f4KcEJbgUn0JVhGQ5GH2c2jepZlkTZ
# 2dhvdEECbOPMg73hty0fyyWlyuLWdJ9cMpONfMtzmHTH8RKEOAbpn/zusyo3H+48
# 6cunyUpBqbmb7MHPchrN+JmvtvaSPSazsj2Zdkh+Y4WlfEYj+yVysQ4zQlBlRyHv
# iOTi6OdjxXg1QcbtJxAUhp+tKaRJzagiCpLkoyW2m8DIuV9cLVHMJsE3OMgfKNgK
# /S+O1fLcaDhuSCrHAbZzArF3Tr4bfLqSwDtGCJfQpqKeIQDJuI+41GLIlm1nYY70
# IFJzEWMOrX/rcMG1CQnUFZOOyDSO+NfILwNnU+eyM49MUekmY54=
# =mmPS
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 08 Sep 2023 06:09:23 EDT
# gpg:                using RSA key 7B73BAD68BE7A2C289314B22701B4F6B1A693E59
# gpg:                issuer "mjt@tls.msk.ru"
# gpg: Good signature from "Michael Tokarev <mjt@tls.msk.ru>" [full]
# gpg:                 aka "Michael Tokarev <mjt@corpit.ru>" [full]
# gpg:                 aka "Michael Tokarev <mjt@debian.org>" [full]
# Primary key fingerprint: 6EE1 95D1 886E 8FFB 810D  4324 457C E0A0 8044 65C5
#      Subkey fingerprint: 7B73 BAD6 8BE7 A2C2 8931  4B22 701B 4F6B 1A69 3E59

* tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu: (22 commits)
  qxl: don't assert() if device isn't yet initialized
  hw/net/vmxnet3: Fix guest-triggerable assert()
  tests/qtest/usb-hcd: Remove the empty "init" tests
  target/ppc: use g_free() in test_opcode_table()
  hw/ppc: use g_free() in spapr_tce_table_post_load()
  trivial: Simplify the spots that use TARGET_BIG_ENDIAN as a numeric value
  accel/tcg: Fix typo in translator_io_start() description
  tests/qtest/test-hmp: Fix migrate_set_parameter xbzrle-cache-size test
  docs tests: Fix use of migrate_set_parameter
  qemu-options.hx: Rephrase the descriptions of the -hd* and -cdrom options
  hw/display/xlnx_dp: update comments
  block: spelling fixes
  misc/other: spelling fixes
  qga/: spelling fixes
  tests/: spelling fixes
  scripts/: spelling fixes
  include/: spelling fixes
  audio: spelling fixes
  xen: spelling fix
  riscv: spelling fixes
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
43 files changed:
MAINTAINERS
block/nbd.c
docs/specs/pci-ids.rst
docs/tools/qemu-nbd.rst
hw/Kconfig
hw/meson.build
hw/ufs/Kconfig [new file with mode: 0644]
hw/ufs/lu.c [new file with mode: 0644]
hw/ufs/meson.build [new file with mode: 0644]
hw/ufs/trace-events [new file with mode: 0644]
hw/ufs/trace.h [new file with mode: 0644]
hw/ufs/ufs.c [new file with mode: 0644]
hw/ufs/ufs.h [new file with mode: 0644]
include/block/nbd.h
include/block/ufs.h [new file with mode: 0644]
include/hw/pci/pci.h
include/hw/pci/pci_ids.h
include/io/channel-util.h
include/io/channel.h
include/qemu/vhost-user-server.h
include/scsi/constants.h
io/channel-command.c
io/channel-file.c
io/channel-null.c
io/channel-socket.c
io/channel-tls.c
io/channel-util.c
io/channel.c
iothread.c
meson.build
migration/channel-block.c
migration/rdma.c
nbd/client-connection.c
nbd/client.c
nbd/server.c
qemu-nbd.c
scsi/qemu-pr-helper.c
tests/qemu-iotests/197
tests/qemu-iotests/197.out
tests/qtest/meson.build
tests/qtest/ufs-test.c [new file with mode: 0644]
util/iov.c
util/vhost-user-server.c

index b471973e1e11c304f36acf55032adb612165446f..bf2366815b8e3aab89f11593e94fabc08759426c 100644 (file)
@@ -2248,6 +2248,13 @@ F: tests/qtest/nvme-test.c
 F: docs/system/devices/nvme.rst
 T: git git://git.infradead.org/qemu-nvme.git nvme-next
 
+ufs
+M: Jeuk Kim <jeuk20.kim@samsung.com>
+S: Supported
+F: hw/ufs/*
+F: include/block/ufs.h
+F: tests/qtest/ufs-test.c
+
 megasas
 M: Hannes Reinecke <hare@suse.com>
 L: qemu-block@nongnu.org
index 5322e66166c6dce5696e5d328e37e7ce1bbfd439..cc48580df703f7d34916ac4548c0fc5ff21751f8 100644 (file)
@@ -352,7 +352,7 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
     }
 
     qio_channel_set_blocking(s->ioc, false, NULL);
-    qio_channel_attach_aio_context(s->ioc, bdrv_get_aio_context(bs));
+    qio_channel_set_follow_coroutine_ctx(s->ioc, true);
 
     /* successfully connected */
     WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
@@ -397,7 +397,6 @@ static void coroutine_fn GRAPH_RDLOCK nbd_reconnect_attempt(BDRVNBDState *s)
 
     /* Finalize previous connection if any */
     if (s->ioc) {
-        qio_channel_detach_aio_context(s->ioc);
         yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
                                  nbd_yank, s->bs);
         object_unref(OBJECT(s->ioc));
@@ -2089,10 +2088,6 @@ static void nbd_attach_aio_context(BlockDriverState *bs,
      * the reconnect_delay_timer cannot be active here.
      */
     assert(!s->reconnect_delay_timer);
-
-    if (s->ioc) {
-        qio_channel_attach_aio_context(s->ioc, new_context);
-    }
 }
 
 static void nbd_detach_aio_context(BlockDriverState *bs)
@@ -2101,10 +2096,6 @@ static void nbd_detach_aio_context(BlockDriverState *bs)
 
     assert(!s->open_timer);
     assert(!s->reconnect_delay_timer);
-
-    if (s->ioc) {
-        qio_channel_detach_aio_context(s->ioc);
-    }
 }
 
 static BlockDriver bdrv_nbd = {
index e302bea484e6a074874df4294ff8307c4bc037b4..d6707fa069a8abe1e82c2aac231ffb5b8ab64387 100644 (file)
@@ -92,6 +92,8 @@ PCI devices (other than virtio):
   PCI PVPanic device (``-device pvpanic-pci``)
 1b36:0012
   PCI ACPI ERST device (``-device acpi-erst``)
+1b36:0013
+  PCI UFS device (``-device ufs``)
 
 All these devices are documented in :doc:`index`.
 
index faf6349ea51e03997803e0ad9340e290ca3dd46e..329f44d98957133d7fd6fadb3c93c900423a81f2 100644 (file)
@@ -197,7 +197,9 @@ driver options if :option:`--image-opts` is specified.
 
 .. option:: -v, --verbose
 
-  Display extra debugging information.
+  Display extra debugging information. This option also keeps the original
+  *STDERR* stream open if the ``qemu-nbd`` process is daemonized due to
+  other options like :option:`--fork` or :option:`-c`.
 
 .. option:: -h, --help
 
index ba62ff6417c9c0edc5b41783dc028447f84b8413..9ca7b38c31f1a42a0a6f4262cb5a8e79464cac0b 100644 (file)
@@ -38,6 +38,7 @@ source smbios/Kconfig
 source ssi/Kconfig
 source timer/Kconfig
 source tpm/Kconfig
+source ufs/Kconfig
 source usb/Kconfig
 source virtio/Kconfig
 source vfio/Kconfig
index c7ac7d3d75ba2c0be267b1528c2492c0bf09d000..f01fac4617c99a3cc567f84f84025f0597bb1ecb 100644 (file)
@@ -37,6 +37,7 @@ subdir('smbios')
 subdir('ssi')
 subdir('timer')
 subdir('tpm')
+subdir('ufs')
 subdir('usb')
 subdir('vfio')
 subdir('virtio')
diff --git a/hw/ufs/Kconfig b/hw/ufs/Kconfig
new file mode 100644 (file)
index 0000000..b7b3392
--- /dev/null
@@ -0,0 +1,4 @@
+config UFS_PCI
+    bool
+    default y if PCI_DEVICES
+    depends on PCI
diff --git a/hw/ufs/lu.c b/hw/ufs/lu.c
new file mode 100644 (file)
index 0000000..e1c46bd
--- /dev/null
@@ -0,0 +1,1445 @@
+/*
+ * QEMU UFS Logical Unit
+ *
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Written by Jeuk Kim <jeuk20.kim@samsung.com>
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "qapi/error.h"
+#include "qemu/memalign.h"
+#include "hw/scsi/scsi.h"
+#include "scsi/constants.h"
+#include "sysemu/block-backend.h"
+#include "qemu/cutils.h"
+#include "trace.h"
+#include "ufs.h"
+
+/*
+ * The code below handling SCSI commands is copied from hw/scsi/scsi-disk.c,
+ * with minor adjustments to make it work for UFS.
+ */
+
+#define SCSI_DMA_BUF_SIZE (128 * KiB)
+#define SCSI_MAX_INQUIRY_LEN 256
+#define SCSI_INQUIRY_DATA_SIZE 36
+#define SCSI_MAX_MODE_LEN 256
+
+typedef struct UfsSCSIReq {
+    SCSIRequest req;
+    /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes.  */
+    uint64_t sector;
+    uint32_t sector_count;
+    uint32_t buflen;
+    bool started;
+    bool need_fua_emulation;
+    struct iovec iov;
+    QEMUIOVector qiov;
+    BlockAcctCookie acct;
+} UfsSCSIReq;
+
+static void ufs_scsi_free_request(SCSIRequest *req)
+{
+    UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+
+    qemu_vfree(r->iov.iov_base);
+}
+
+static void scsi_check_condition(UfsSCSIReq *r, SCSISense sense)
+{
+    trace_ufs_scsi_check_condition(r->req.tag, sense.key, sense.asc,
+                                   sense.ascq);
+    scsi_req_build_sense(&r->req, sense);
+    scsi_req_complete(&r->req, CHECK_CONDITION);
+}
+
+static int ufs_scsi_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf,
+                                     uint32_t outbuf_len)
+{
+    UfsHc *u = UFS(req->bus->qbus.parent);
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev);
+    uint8_t page_code = req->cmd.buf[2];
+    int start, buflen = 0;
+
+    if (outbuf_len < SCSI_INQUIRY_DATA_SIZE) {
+        return -1;
+    }
+
+    outbuf[buflen++] = lu->qdev.type & 0x1f;
+    outbuf[buflen++] = page_code;
+    outbuf[buflen++] = 0x00;
+    outbuf[buflen++] = 0x00;
+    start = buflen;
+
+    switch (page_code) {
+    case 0x00: /* Supported page codes, mandatory */
+    {
+        trace_ufs_scsi_emulate_vpd_page_00(req->cmd.xfer);
+        outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
+        if (u->params.serial) {
+            outbuf[buflen++] = 0x80; /* unit serial number */
+        }
+        outbuf[buflen++] = 0x87; /* mode page policy */
+        break;
+    }
+    case 0x80: /* Device serial number, optional */
+    {
+        int l;
+
+        if (!u->params.serial) {
+            trace_ufs_scsi_emulate_vpd_page_80_not_supported();
+            return -1;
+        }
+
+        l = strlen(u->params.serial);
+        if (l > SCSI_INQUIRY_DATA_SIZE) {
+            l = SCSI_INQUIRY_DATA_SIZE;
+        }
+
+        trace_ufs_scsi_emulate_vpd_page_80(req->cmd.xfer);
+        memcpy(outbuf + buflen, u->params.serial, l);
+        buflen += l;
+        break;
+    }
+    case 0x87: /* Mode Page Policy, mandatory */
+    {
+        trace_ufs_scsi_emulate_vpd_page_87(req->cmd.xfer);
+        outbuf[buflen++] = 0x3f; /* apply to all mode pages and subpages */
+        outbuf[buflen++] = 0xff;
+        outbuf[buflen++] = 0; /* shared */
+        outbuf[buflen++] = 0;
+        break;
+    }
+    default:
+        return -1;
+    }
+    /* done with EVPD */
+    assert(buflen - start <= 255);
+    outbuf[start - 1] = buflen - start;
+    return buflen;
+}
+
+static int ufs_scsi_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf,
+                                    uint32_t outbuf_len)
+{
+    int buflen = 0;
+
+    if (outbuf_len < SCSI_INQUIRY_DATA_SIZE) {
+        return -1;
+    }
+
+    if (req->cmd.buf[1] & 0x1) {
+        /* Vital product data */
+        return ufs_scsi_emulate_vpd_page(req, outbuf, outbuf_len);
+    }
+
+    /* Standard INQUIRY data */
+    if (req->cmd.buf[2] != 0) {
+        return -1;
+    }
+
+    /* PAGE CODE == 0 */
+    buflen = req->cmd.xfer;
+    if (buflen > SCSI_MAX_INQUIRY_LEN) {
+        buflen = SCSI_MAX_INQUIRY_LEN;
+    }
+
+    if (is_wlun(req->lun)) {
+        outbuf[0] = TYPE_WLUN;
+    } else {
+        outbuf[0] = 0;
+    }
+    outbuf[1] = 0;
+
+    strpadcpy((char *)&outbuf[16], 16, "QEMU UFS", ' ');
+    strpadcpy((char *)&outbuf[8], 8, "QEMU", ' ');
+
+    memset(&outbuf[32], 0, 4);
+
+    outbuf[2] = 0x06; /* SPC-4 */
+    outbuf[3] = 0x2;
+
+    if (buflen > SCSI_INQUIRY_DATA_SIZE) {
+        outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
+    } else {
+        /*
+         * If the allocation length of CDB is too small, the additional
+         * length is not adjusted
+         */
+        outbuf[4] = SCSI_INQUIRY_DATA_SIZE - 5;
+    }
+
+    /* Support TCQ.  */
+    outbuf[7] = req->bus->info->tcq ? 0x02 : 0;
+    return buflen;
+}
+
+static int mode_sense_page(UfsLu *lu, int page, uint8_t **p_outbuf,
+                           int page_control)
+{
+    static const int mode_sense_valid[0x3f] = {
+        [MODE_PAGE_CACHING] = 1,
+        [MODE_PAGE_R_W_ERROR] = 1,
+        [MODE_PAGE_CONTROL] = 1,
+    };
+
+    uint8_t *p = *p_outbuf + 2;
+    int length;
+
+    assert(page < ARRAY_SIZE(mode_sense_valid));
+    if ((mode_sense_valid[page]) == 0) {
+        return -1;
+    }
+
+    /*
+     * If Changeable Values are requested, a mask denoting those mode parameters
+     * that are changeable shall be returned. As we currently don't support
+     * parameter changes via MODE_SELECT all bits are returned set to zero.
+     * The buffer was already memset to zero by the caller of this function.
+     */
+    switch (page) {
+    case MODE_PAGE_CACHING:
+        length = 0x12;
+        if (page_control == 1 || /* Changeable Values */
+            blk_enable_write_cache(lu->qdev.conf.blk)) {
+            p[0] = 4; /* WCE */
+        }
+        break;
+
+    case MODE_PAGE_R_W_ERROR:
+        length = 10;
+        if (page_control == 1) { /* Changeable Values */
+            break;
+        }
+        p[0] = 0x80; /* Automatic Write Reallocation Enabled */
+        break;
+
+    case MODE_PAGE_CONTROL:
+        length = 10;
+        if (page_control == 1) { /* Changeable Values */
+            break;
+        }
+        p[1] = 0x10; /* Queue Algorithm modifier */
+        p[8] = 0xff; /* Busy Timeout Period */
+        p[9] = 0xff;
+        break;
+
+    default:
+        return -1;
+    }
+
+    assert(length < 256);
+    (*p_outbuf)[0] = page;
+    (*p_outbuf)[1] = length;
+    *p_outbuf += length + 2;
+    return length + 2;
+}
+
+static int ufs_scsi_emulate_mode_sense(UfsSCSIReq *r, uint8_t *outbuf)
+{
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+    bool dbd;
+    int page, buflen, ret, page_control;
+    uint8_t *p;
+    uint8_t dev_specific_param = 0;
+
+    dbd = (r->req.cmd.buf[1] & 0x8) != 0;
+    if (!dbd) {
+        return -1;
+    }
+
+    page = r->req.cmd.buf[2] & 0x3f;
+    page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
+
+    trace_ufs_scsi_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
+                                                                          10,
+                                      page, r->req.cmd.xfer, page_control);
+    memset(outbuf, 0, r->req.cmd.xfer);
+    p = outbuf;
+
+    if (!blk_is_writable(lu->qdev.conf.blk)) {
+        dev_specific_param |= 0x80; /* Readonly.  */
+    }
+
+    p[2] = 0; /* Medium type.  */
+    p[3] = dev_specific_param;
+    p[6] = p[7] = 0; /* Block descriptor length.  */
+    p += 8;
+
+    if (page_control == 3) {
+        /* Saved Values */
+        scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
+        return -1;
+    }
+
+    if (page == 0x3f) {
+        for (page = 0; page <= 0x3e; page++) {
+            mode_sense_page(lu, page, &p, page_control);
+        }
+    } else {
+        ret = mode_sense_page(lu, page, &p, page_control);
+        if (ret == -1) {
+            return -1;
+        }
+    }
+
+    buflen = p - outbuf;
+    /*
+     * The mode data length field specifies the length in bytes of the
+     * following data that is available to be transferred. The mode data
+     * length does not include itself.
+     */
+    outbuf[0] = ((buflen - 2) >> 8) & 0xff;
+    outbuf[1] = (buflen - 2) & 0xff;
+    return buflen;
+}
+
+/*
+ * scsi_handle_rw_error has two return values.  False means that the error
+ * must be ignored, true means that the error has been processed and the
+ * caller should not do anything else for this request.  Note that
+ * scsi_handle_rw_error always manages its reference counts, independent
+ * of the return value.
+ */
+static bool scsi_handle_rw_error(UfsSCSIReq *r, int ret, bool acct_failed)
+{
+    bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+    SCSISense sense = SENSE_CODE(NO_SENSE);
+    int error = 0;
+    bool req_has_sense = false;
+    BlockErrorAction action;
+    int status;
+
+    if (ret < 0) {
+        status = scsi_sense_from_errno(-ret, &sense);
+        error = -ret;
+    } else {
+        /* A passthrough command has completed with nonzero status.  */
+        status = ret;
+        if (status == CHECK_CONDITION) {
+            req_has_sense = true;
+            error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
+        } else {
+            error = EINVAL;
+        }
+    }
+
+    /*
+     * Check whether the error has to be handled by the guest or should
+     * rather follow the rerror=/werror= settings.  Guest-handled errors
+     * are usually retried immediately, so do not post them to QMP and
+     * do not account them as failed I/O.
+     */
+    if (req_has_sense && scsi_sense_buf_is_guest_recoverable(
+                             r->req.sense, sizeof(r->req.sense))) {
+        action = BLOCK_ERROR_ACTION_REPORT;
+        acct_failed = false;
+    } else {
+        action = blk_get_error_action(lu->qdev.conf.blk, is_read, error);
+        blk_error_action(lu->qdev.conf.blk, action, is_read, error);
+    }
+
+    switch (action) {
+    case BLOCK_ERROR_ACTION_REPORT:
+        if (acct_failed) {
+            block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+        }
+        if (!req_has_sense && status == CHECK_CONDITION) {
+            scsi_req_build_sense(&r->req, sense);
+        }
+        scsi_req_complete(&r->req, status);
+        return true;
+
+    case BLOCK_ERROR_ACTION_IGNORE:
+        return false;
+
+    case BLOCK_ERROR_ACTION_STOP:
+        scsi_req_retry(&r->req);
+        return true;
+
+    default:
+        g_assert_not_reached();
+    }
+}
+
+static bool ufs_scsi_req_check_error(UfsSCSIReq *r, int ret, bool acct_failed)
+{
+    if (r->req.io_canceled) {
+        scsi_req_cancel_complete(&r->req);
+        return true;
+    }
+
+    if (ret < 0) {
+        return scsi_handle_rw_error(r, ret, acct_failed);
+    }
+
+    return false;
+}
+
+static void scsi_aio_complete(void *opaque, int ret)
+{
+    UfsSCSIReq *r = (UfsSCSIReq *)opaque;
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+    assert(r->req.aiocb != NULL);
+    r->req.aiocb = NULL;
+    aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
+    if (ufs_scsi_req_check_error(r, ret, true)) {
+        goto done;
+    }
+
+    block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+    scsi_req_complete(&r->req, GOOD);
+
+done:
+    aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
+    scsi_req_unref(&r->req);
+}
+
+static int32_t ufs_scsi_emulate_command(SCSIRequest *req, uint8_t *buf)
+{
+    UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev);
+    uint32_t last_block = 0;
+    uint8_t *outbuf;
+    int buflen;
+
+    switch (req->cmd.buf[0]) {
+    case INQUIRY:
+    case MODE_SENSE_10:
+    case START_STOP:
+    case REQUEST_SENSE:
+        break;
+
+    default:
+        if (!blk_is_available(lu->qdev.conf.blk)) {
+            scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
+            return 0;
+        }
+        break;
+    }
+
+    /*
+     * FIXME: we shouldn't return anything bigger than 4k, but the code
+     * requires the buffer to be as big as req->cmd.xfer in several
+     * places.  So, do not allow CDBs with a very large ALLOCATION
+     * LENGTH.  The real fix would be to modify scsi_read_data and
+     * dma_buf_read, so that they return data beyond the buflen
+     * as all zeros.
+     */
+    if (req->cmd.xfer > 65536) {
+        goto illegal_request;
+    }
+    r->buflen = MAX(4096, req->cmd.xfer);
+
+    if (!r->iov.iov_base) {
+        r->iov.iov_base = blk_blockalign(lu->qdev.conf.blk, r->buflen);
+    }
+
+    outbuf = r->iov.iov_base;
+    memset(outbuf, 0, r->buflen);
+    switch (req->cmd.buf[0]) {
+    case TEST_UNIT_READY:
+        assert(blk_is_available(lu->qdev.conf.blk));
+        break;
+    case INQUIRY:
+        buflen = ufs_scsi_emulate_inquiry(req, outbuf, r->buflen);
+        if (buflen < 0) {
+            goto illegal_request;
+        }
+        break;
+    case MODE_SENSE_10:
+        buflen = ufs_scsi_emulate_mode_sense(r, outbuf);
+        if (buflen < 0) {
+            goto illegal_request;
+        }
+        break;
+    case READ_CAPACITY_10:
+        /* The normal LEN field for this command is zero.  */
+        memset(outbuf, 0, 8);
+        if (lu->qdev.max_lba > 0) {
+            last_block = lu->qdev.max_lba - 1;
+        };
+        outbuf[0] = (last_block >> 24) & 0xff;
+        outbuf[1] = (last_block >> 16) & 0xff;
+        outbuf[2] = (last_block >> 8) & 0xff;
+        outbuf[3] = last_block & 0xff;
+        outbuf[4] = (lu->qdev.blocksize >> 24) & 0xff;
+        outbuf[5] = (lu->qdev.blocksize >> 16) & 0xff;
+        outbuf[6] = (lu->qdev.blocksize >> 8) & 0xff;
+        outbuf[7] = lu->qdev.blocksize & 0xff;
+        break;
+    case REQUEST_SENSE:
+        /* Just return "NO SENSE".  */
+        buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
+                                    (req->cmd.buf[1] & 1) == 0);
+        if (buflen < 0) {
+            goto illegal_request;
+        }
+        break;
+    case SYNCHRONIZE_CACHE:
+        /* The request is used as the AIO opaque value, so add a ref.  */
+        scsi_req_ref(&r->req);
+        block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0,
+                         BLOCK_ACCT_FLUSH);
+        r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r);
+        return 0;
+    case VERIFY_10:
+        trace_ufs_scsi_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
+        if (req->cmd.buf[1] & 6) {
+            goto illegal_request;
+        }
+        break;
+    case SERVICE_ACTION_IN_16:
+        /* Service Action In subcommands. */
+        if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
+            trace_ufs_scsi_emulate_command_SAI_16();
+            memset(outbuf, 0, req->cmd.xfer);
+
+            if (lu->qdev.max_lba > 0) {
+                last_block = lu->qdev.max_lba - 1;
+            };
+            outbuf[0] = 0;
+            outbuf[1] = 0;
+            outbuf[2] = 0;
+            outbuf[3] = 0;
+            outbuf[4] = (last_block >> 24) & 0xff;
+            outbuf[5] = (last_block >> 16) & 0xff;
+            outbuf[6] = (last_block >> 8) & 0xff;
+            outbuf[7] = last_block & 0xff;
+            outbuf[8] = (lu->qdev.blocksize >> 24) & 0xff;
+            outbuf[9] = (lu->qdev.blocksize >> 16) & 0xff;
+            outbuf[10] = (lu->qdev.blocksize >> 8) & 0xff;
+            outbuf[11] = lu->qdev.blocksize & 0xff;
+            outbuf[12] = 0;
+            outbuf[13] = get_physical_block_exp(&lu->qdev.conf);
+
+            if (lu->unit_desc.provisioning_type == 2 ||
+                lu->unit_desc.provisioning_type == 3) {
+                outbuf[14] = 0x80;
+            }
+            /* Protection, exponent and lowest lba field left blank. */
+            break;
+        }
+        trace_ufs_scsi_emulate_command_SAI_unsupported();
+        goto illegal_request;
+    case MODE_SELECT_10:
+        trace_ufs_scsi_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
+        break;
+    case START_STOP:
+        /*
+         * TODO: START_STOP is not yet implemented. It always returns success.
+         * Revisit it when ufs power management is implemented.
+         */
+        trace_ufs_scsi_emulate_command_START_STOP();
+        break;
+    case FORMAT_UNIT:
+        trace_ufs_scsi_emulate_command_FORMAT_UNIT();
+        break;
+    case SEND_DIAGNOSTIC:
+        trace_ufs_scsi_emulate_command_SEND_DIAGNOSTIC();
+        break;
+    default:
+        trace_ufs_scsi_emulate_command_UNKNOWN(buf[0],
+                                               scsi_command_name(buf[0]));
+        scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
+        return 0;
+    }
+    assert(!r->req.aiocb);
+    r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
+    if (r->iov.iov_len == 0) {
+        scsi_req_complete(&r->req, GOOD);
+    }
+    if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
+        assert(r->iov.iov_len == req->cmd.xfer);
+        return -r->iov.iov_len;
+    } else {
+        return r->iov.iov_len;
+    }
+
+illegal_request:
+    if (r->req.status == -1) {
+        scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
+    }
+    return 0;
+}
+
+static void ufs_scsi_emulate_read_data(SCSIRequest *req)
+{
+    UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+    int buflen = r->iov.iov_len;
+
+    if (buflen) {
+        trace_ufs_scsi_emulate_read_data(buflen);
+        r->iov.iov_len = 0;
+        r->started = true;
+        scsi_req_data(&r->req, buflen);
+        return;
+    }
+
+    /* This also clears the sense buffer for REQUEST SENSE.  */
+    scsi_req_complete(&r->req, GOOD);
+}
+
+static int ufs_scsi_check_mode_select(UfsLu *lu, int page, uint8_t *inbuf,
+                                      int inlen)
+{
+    uint8_t mode_current[SCSI_MAX_MODE_LEN];
+    uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
+    uint8_t *p;
+    int len, expected_len, changeable_len, i;
+
+    /*
+     * The input buffer does not include the page header, so it is
+     * off by 2 bytes.
+     */
+    expected_len = inlen + 2;
+    if (expected_len > SCSI_MAX_MODE_LEN) {
+        return -1;
+    }
+
+    /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */
+    if (page == MODE_PAGE_ALLS) {
+        return -1;
+    }
+
+    p = mode_current;
+    memset(mode_current, 0, inlen + 2);
+    len = mode_sense_page(lu, page, &p, 0);
+    if (len < 0 || len != expected_len) {
+        return -1;
+    }
+
+    p = mode_changeable;
+    memset(mode_changeable, 0, inlen + 2);
+    changeable_len = mode_sense_page(lu, page, &p, 1);
+    assert(changeable_len == len);
+
+    /*
+     * Check that unchangeable bits are the same as what MODE SENSE
+     * would return.
+     */
+    for (i = 2; i < len; i++) {
+        if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
+            return -1;
+        }
+    }
+    return 0;
+}
+
+static void ufs_scsi_apply_mode_select(UfsLu *lu, int page, uint8_t *p)
+{
+    switch (page) {
+    case MODE_PAGE_CACHING:
+        blk_set_enable_write_cache(lu->qdev.conf.blk, (p[0] & 4) != 0);
+        break;
+
+    default:
+        break;
+    }
+}
+
+static int mode_select_pages(UfsSCSIReq *r, uint8_t *p, int len, bool change)
+{
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+    while (len > 0) {
+        int page, page_len;
+
+        page = p[0] & 0x3f;
+        if (p[0] & 0x40) {
+            goto invalid_param;
+        } else {
+            if (len < 2) {
+                goto invalid_param_len;
+            }
+            page_len = p[1];
+            p += 2;
+            len -= 2;
+        }
+
+        if (page_len > len) {
+            goto invalid_param_len;
+        }
+
+        if (!change) {
+            if (ufs_scsi_check_mode_select(lu, page, p, page_len) < 0) {
+                goto invalid_param;
+            }
+        } else {
+            ufs_scsi_apply_mode_select(lu, page, p);
+        }
+
+        p += page_len;
+        len -= page_len;
+    }
+    return 0;
+
+invalid_param:
+    scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
+    return -1;
+
+invalid_param_len:
+    scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
+    return -1;
+}
+
+static void ufs_scsi_emulate_mode_select(UfsSCSIReq *r, uint8_t *inbuf)
+{
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+    uint8_t *p = inbuf;
+    int len = r->req.cmd.xfer;
+    int hdr_len = 8;
+    int bd_len;
+    int pass;
+
+    /* We only support PF=1, SP=0.  */
+    if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
+        goto invalid_field;
+    }
+
+    if (len < hdr_len) {
+        goto invalid_param_len;
+    }
+
+    bd_len = lduw_be_p(&p[6]);
+    if (bd_len != 0) {
+        goto invalid_param;
+    }
+
+    len -= hdr_len;
+    p += hdr_len;
+
+    /* Ensure no change is made if there is an error!  */
+    for (pass = 0; pass < 2; pass++) {
+        if (mode_select_pages(r, p, len, pass == 1) < 0) {
+            assert(pass == 0);
+            return;
+        }
+    }
+
+    if (!blk_enable_write_cache(lu->qdev.conf.blk)) {
+        /* The request is used as the AIO opaque value, so add a ref.  */
+        scsi_req_ref(&r->req);
+        block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0,
+                         BLOCK_ACCT_FLUSH);
+        r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r);
+        return;
+    }
+
+    scsi_req_complete(&r->req, GOOD);
+    return;
+
+invalid_param:
+    scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
+    return;
+
+invalid_param_len:
+    scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
+    return;
+
+invalid_field:
+    scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
+}
+
+/* block_num and nb_blocks expected to be in qdev blocksize */
+static inline bool check_lba_range(UfsLu *lu, uint64_t block_num,
+                                   uint32_t nb_blocks)
+{
+    /*
+     * The first line tests that no overflow happens when computing the last
+     * block.  The second line tests that the last accessed block is in
+     * range.
+     *
+     * Careful, the computations should not underflow for nb_blocks == 0,
+     * and a 0-block read to the first LBA beyond the end of device is
+     * valid.
+     */
+    return (block_num <= block_num + nb_blocks &&
+            block_num + nb_blocks <= lu->qdev.max_lba + 1);
+}
+
+static void ufs_scsi_emulate_write_data(SCSIRequest *req)
+{
+    UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+
+    if (r->iov.iov_len) {
+        int buflen = r->iov.iov_len;
+        trace_ufs_scsi_emulate_write_data(buflen);
+        r->iov.iov_len = 0;
+        scsi_req_data(&r->req, buflen);
+        return;
+    }
+
+    switch (req->cmd.buf[0]) {
+    case MODE_SELECT_10:
+        /* This also clears the sense buffer for REQUEST SENSE.  */
+        ufs_scsi_emulate_mode_select(r, r->iov.iov_base);
+        break;
+    default:
+        abort();
+    }
+}
+
+/* Return a pointer to the data buffer.  */
+static uint8_t *ufs_scsi_get_buf(SCSIRequest *req)
+{
+    UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+
+    return (uint8_t *)r->iov.iov_base;
+}
+
+static int32_t ufs_scsi_dma_command(SCSIRequest *req, uint8_t *buf)
+{
+    UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev);
+    uint32_t len;
+    uint8_t command;
+
+    command = buf[0];
+
+    if (!blk_is_available(lu->qdev.conf.blk)) {
+        scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
+        return 0;
+    }
+
+    len = scsi_data_cdb_xfer(r->req.cmd.buf);
+    switch (command) {
+    case READ_6:
+    case READ_10:
+        trace_ufs_scsi_dma_command_READ(r->req.cmd.lba, len);
+        if (r->req.cmd.buf[1] & 0xe0) {
+            goto illegal_request;
+        }
+        if (!check_lba_range(lu, r->req.cmd.lba, len)) {
+            goto illegal_lba;
+        }
+        r->sector = r->req.cmd.lba * (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
+        r->sector_count = len * (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
+        break;
+    case WRITE_6:
+    case WRITE_10:
+        trace_ufs_scsi_dma_command_WRITE(r->req.cmd.lba, len);
+        if (!blk_is_writable(lu->qdev.conf.blk)) {
+            scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
+            return 0;
+        }
+        if (r->req.cmd.buf[1] & 0xe0) {
+            goto illegal_request;
+        }
+        if (!check_lba_range(lu, r->req.cmd.lba, len)) {
+            goto illegal_lba;
+        }
+        r->sector = r->req.cmd.lba * (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
+        r->sector_count = len * (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
+        break;
+    default:
+        abort();
+    illegal_request:
+        scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
+        return 0;
+    illegal_lba:
+        scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
+        return 0;
+    }
+    r->need_fua_emulation = ((r->req.cmd.buf[1] & 8) != 0);
+    if (r->sector_count == 0) {
+        scsi_req_complete(&r->req, GOOD);
+    }
+    assert(r->iov.iov_len == 0);
+    if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
+        return -r->sector_count * BDRV_SECTOR_SIZE;
+    } else {
+        return r->sector_count * BDRV_SECTOR_SIZE;
+    }
+}
+
+static void scsi_write_do_fua(UfsSCSIReq *r)
+{
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+    assert(r->req.aiocb == NULL);
+    assert(!r->req.io_canceled);
+
+    if (r->need_fua_emulation) {
+        block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0,
+                         BLOCK_ACCT_FLUSH);
+        r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r);
+        return;
+    }
+
+    scsi_req_complete(&r->req, GOOD);
+    scsi_req_unref(&r->req);
+}
+
+static void scsi_dma_complete_noio(UfsSCSIReq *r, int ret)
+{
+    assert(r->req.aiocb == NULL);
+    if (ufs_scsi_req_check_error(r, ret, false)) {
+        goto done;
+    }
+
+    r->sector += r->sector_count;
+    r->sector_count = 0;
+    if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
+        scsi_write_do_fua(r);
+        return;
+    } else {
+        scsi_req_complete(&r->req, GOOD);
+    }
+
+done:
+    scsi_req_unref(&r->req);
+}
+
+static void scsi_dma_complete(void *opaque, int ret)
+{
+    UfsSCSIReq *r = (UfsSCSIReq *)opaque;
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+    assert(r->req.aiocb != NULL);
+    r->req.aiocb = NULL;
+
+    aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
+    if (ret < 0) {
+        block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+    } else {
+        block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+    }
+    scsi_dma_complete_noio(r, ret);
+    aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
+}
+
+static BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
+                                  BlockCompletionFunc *cb, void *cb_opaque,
+                                  void *opaque)
+{
+    UfsSCSIReq *r = opaque;
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+    return blk_aio_preadv(lu->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
+}
+
+static void scsi_init_iovec(UfsSCSIReq *r, size_t size)
+{
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+    if (!r->iov.iov_base) {
+        r->buflen = size;
+        r->iov.iov_base = blk_blockalign(lu->qdev.conf.blk, r->buflen);
+    }
+    r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen);
+    qemu_iovec_init_external(&r->qiov, &r->iov, 1);
+}
+
+static void scsi_read_complete_noio(UfsSCSIReq *r, int ret)
+{
+    uint32_t n;
+
+    assert(r->req.aiocb == NULL);
+    if (ufs_scsi_req_check_error(r, ret, false)) {
+        goto done;
+    }
+
+    n = r->qiov.size / BDRV_SECTOR_SIZE;
+    r->sector += n;
+    r->sector_count -= n;
+    scsi_req_data(&r->req, r->qiov.size);
+
+done:
+    scsi_req_unref(&r->req);
+}
+
+static void scsi_read_complete(void *opaque, int ret)
+{
+    UfsSCSIReq *r = (UfsSCSIReq *)opaque;
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+    assert(r->req.aiocb != NULL);
+    r->req.aiocb = NULL;
+    trace_ufs_scsi_read_data_count(r->sector_count);
+    aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
+    if (ret < 0) {
+        block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+    } else {
+        block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+        trace_ufs_scsi_read_complete(r->req.tag, r->qiov.size);
+    }
+    scsi_read_complete_noio(r, ret);
+    aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
+}
+
+/* Actually issue a read to the block device.  */
+static void scsi_do_read(UfsSCSIReq *r, int ret)
+{
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+    assert(r->req.aiocb == NULL);
+    if (ufs_scsi_req_check_error(r, ret, false)) {
+        goto done;
+    }
+
+    /* The request is used as the AIO opaque value, so add a ref.  */
+    scsi_req_ref(&r->req);
+
+    if (r->req.sg) {
+        dma_acct_start(lu->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
+        r->req.residual -= r->req.sg->size;
+        r->req.aiocb = dma_blk_io(
+            blk_get_aio_context(lu->qdev.conf.blk), r->req.sg,
+            r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, scsi_dma_readv, r,
+            scsi_dma_complete, r, DMA_DIRECTION_FROM_DEVICE);
+    } else {
+        scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
+        block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct,
+                         r->qiov.size, BLOCK_ACCT_READ);
+        r->req.aiocb = scsi_dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
+                                      scsi_read_complete, r, r);
+    }
+
+done:
+    scsi_req_unref(&r->req);
+}
+
+static void scsi_do_read_cb(void *opaque, int ret)
+{
+    UfsSCSIReq *r = (UfsSCSIReq *)opaque;
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+    assert(r->req.aiocb != NULL);
+    r->req.aiocb = NULL;
+
+    aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
+    if (ret < 0) {
+        block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+    } else {
+        block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+    }
+    scsi_do_read(opaque, ret);
+    aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
+}
+
+/* Read more data from scsi device into buffer.  */
+static void scsi_read_data(SCSIRequest *req)
+{
+    UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+    bool first;
+
+    trace_ufs_scsi_read_data_count(r->sector_count);
+    if (r->sector_count == 0) {
+        /* This also clears the sense buffer for REQUEST SENSE.  */
+        scsi_req_complete(&r->req, GOOD);
+        return;
+    }
+
+    /* No data transfer may already be in progress */
+    assert(r->req.aiocb == NULL);
+
+    /* The request is used as the AIO opaque value, so add a ref.  */
+    scsi_req_ref(&r->req);
+    if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
+        trace_ufs_scsi_read_data_invalid();
+        scsi_read_complete_noio(r, -EINVAL);
+        return;
+    }
+
+    if (!blk_is_available(req->dev->conf.blk)) {
+        scsi_read_complete_noio(r, -ENOMEDIUM);
+        return;
+    }
+
+    first = !r->started;
+    r->started = true;
+    if (first && r->need_fua_emulation) {
+        block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0,
+                         BLOCK_ACCT_FLUSH);
+        r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_do_read_cb, r);
+    } else {
+        scsi_do_read(r, 0);
+    }
+}
+
+static void scsi_write_complete_noio(UfsSCSIReq *r, int ret)
+{
+    uint32_t n;
+
+    assert(r->req.aiocb == NULL);
+    if (ufs_scsi_req_check_error(r, ret, false)) {
+        goto done;
+    }
+
+    n = r->qiov.size / BDRV_SECTOR_SIZE;
+    r->sector += n;
+    r->sector_count -= n;
+    if (r->sector_count == 0) {
+        scsi_write_do_fua(r);
+        return;
+    } else {
+        scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
+        trace_ufs_scsi_write_complete_noio(r->req.tag, r->qiov.size);
+        scsi_req_data(&r->req, r->qiov.size);
+    }
+
+done:
+    scsi_req_unref(&r->req);
+}
+
+static void scsi_write_complete(void *opaque, int ret)
+{
+    UfsSCSIReq *r = (UfsSCSIReq *)opaque;
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+    assert(r->req.aiocb != NULL);
+    r->req.aiocb = NULL;
+
+    aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
+    if (ret < 0) {
+        block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+    } else {
+        block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+    }
+    scsi_write_complete_noio(r, ret);
+    aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
+}
+
+static BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
+                                   BlockCompletionFunc *cb, void *cb_opaque,
+                                   void *opaque)
+{
+    UfsSCSIReq *r = opaque;
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+    return blk_aio_pwritev(lu->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
+}
+
+static void scsi_write_data(SCSIRequest *req)
+{
+    UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+    /* No data transfer may already be in progress */
+    assert(r->req.aiocb == NULL);
+
+    /* The request is used as the AIO opaque value, so add a ref.  */
+    scsi_req_ref(&r->req);
+    if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
+        trace_ufs_scsi_write_data_invalid();
+        scsi_write_complete_noio(r, -EINVAL);
+        return;
+    }
+
+    if (!r->req.sg && !r->qiov.size) {
+        /* Called for the first time.  Ask the driver to send us more data.  */
+        r->started = true;
+        scsi_write_complete_noio(r, 0);
+        return;
+    }
+    if (!blk_is_available(req->dev->conf.blk)) {
+        scsi_write_complete_noio(r, -ENOMEDIUM);
+        return;
+    }
+
+    if (r->req.sg) {
+        dma_acct_start(lu->qdev.conf.blk, &r->acct, r->req.sg,
+                       BLOCK_ACCT_WRITE);
+        r->req.residual -= r->req.sg->size;
+        r->req.aiocb = dma_blk_io(
+            blk_get_aio_context(lu->qdev.conf.blk), r->req.sg,
+            r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, scsi_dma_writev, r,
+            scsi_dma_complete, r, DMA_DIRECTION_TO_DEVICE);
+    } else {
+        block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct,
+                         r->qiov.size, BLOCK_ACCT_WRITE);
+        r->req.aiocb = scsi_dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
+                                       scsi_write_complete, r, r);
+    }
+}
+
+static const SCSIReqOps ufs_scsi_emulate_reqops = {
+    .size = sizeof(UfsSCSIReq),
+    .free_req = ufs_scsi_free_request,
+    .send_command = ufs_scsi_emulate_command,
+    .read_data = ufs_scsi_emulate_read_data,
+    .write_data = ufs_scsi_emulate_write_data,
+    .get_buf = ufs_scsi_get_buf,
+};
+
+static const SCSIReqOps ufs_scsi_dma_reqops = {
+    .size = sizeof(UfsSCSIReq),
+    .free_req = ufs_scsi_free_request,
+    .send_command = ufs_scsi_dma_command,
+    .read_data = scsi_read_data,
+    .write_data = scsi_write_data,
+    .get_buf = ufs_scsi_get_buf,
+};
+
+/*
+ * Following commands are not yet supported
+ * PRE_FETCH(10),
+ * UNMAP,
+ * WRITE_BUFFER, READ_BUFFER,
+ * SECURITY_PROTOCOL_IN, SECURITY_PROTOCOL_OUT
+ */
+static const SCSIReqOps *const ufs_scsi_reqops_dispatch[256] = {
+    [TEST_UNIT_READY] = &ufs_scsi_emulate_reqops,
+    [INQUIRY] = &ufs_scsi_emulate_reqops,
+    [MODE_SENSE_10] = &ufs_scsi_emulate_reqops,
+    [START_STOP] = &ufs_scsi_emulate_reqops,
+    [READ_CAPACITY_10] = &ufs_scsi_emulate_reqops,
+    [REQUEST_SENSE] = &ufs_scsi_emulate_reqops,
+    [SYNCHRONIZE_CACHE] = &ufs_scsi_emulate_reqops,
+    [MODE_SELECT_10] = &ufs_scsi_emulate_reqops,
+    [VERIFY_10] = &ufs_scsi_emulate_reqops,
+    [FORMAT_UNIT] = &ufs_scsi_emulate_reqops,
+    [SERVICE_ACTION_IN_16] = &ufs_scsi_emulate_reqops,
+    [SEND_DIAGNOSTIC] = &ufs_scsi_emulate_reqops,
+
+    [READ_6] = &ufs_scsi_dma_reqops,
+    [READ_10] = &ufs_scsi_dma_reqops,
+    [WRITE_6] = &ufs_scsi_dma_reqops,
+    [WRITE_10] = &ufs_scsi_dma_reqops,
+};
+
+static SCSIRequest *scsi_new_request(SCSIDevice *dev, uint32_t tag,
+                                     uint32_t lun, uint8_t *buf,
+                                     void *hba_private)
+{
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev);
+    SCSIRequest *req;
+    const SCSIReqOps *ops;
+    uint8_t command;
+
+    command = buf[0];
+    ops = ufs_scsi_reqops_dispatch[command];
+    if (!ops) {
+        ops = &ufs_scsi_emulate_reqops;
+    }
+    req = scsi_req_alloc(ops, &lu->qdev, tag, lun, hba_private);
+
+    return req;
+}
+
+static Property ufs_lu_props[] = {
+    DEFINE_PROP_DRIVE("drive", UfsLu, qdev.conf.blk),
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static bool ufs_lu_brdv_init(UfsLu *lu, Error **errp)
+{
+    SCSIDevice *dev = &lu->qdev;
+    bool read_only;
+
+    if (!lu->qdev.conf.blk) {
+        error_setg(errp, "drive property not set");
+        return false;
+    }
+
+    if (!blkconf_blocksizes(&lu->qdev.conf, errp)) {
+        return false;
+    }
+
+    if (blk_get_aio_context(lu->qdev.conf.blk) != qemu_get_aio_context() &&
+        !lu->qdev.hba_supports_iothread) {
+        error_setg(errp, "HBA does not support iothreads");
+        return false;
+    }
+
+    read_only = !blk_supports_write_perm(lu->qdev.conf.blk);
+
+    if (!blkconf_apply_backend_options(&dev->conf, read_only,
+                                       dev->type == TYPE_DISK, errp)) {
+        return false;
+    }
+
+    if (blk_is_sg(lu->qdev.conf.blk)) {
+        error_setg(errp, "unwanted /dev/sg*");
+        return false;
+    }
+
+    blk_iostatus_enable(lu->qdev.conf.blk);
+    return true;
+}
+
+static bool ufs_add_lu(UfsHc *u, UfsLu *lu, Error **errp)
+{
+    BlockBackend *blk = lu->qdev.conf.blk;
+    int64_t brdv_len = blk_getlength(blk);
+    uint64_t raw_dev_cap =
+        be64_to_cpu(u->geometry_desc.total_raw_device_capacity);
+
+    if (u->device_desc.number_lu >= UFS_MAX_LUS) {
+        error_setg(errp, "ufs host controller has too many logical units.");
+        return false;
+    }
+
+    if (u->lus[lu->lun] != NULL) {
+        error_setg(errp, "ufs logical unit %d already exists.", lu->lun);
+        return false;
+    }
+
+    u->lus[lu->lun] = lu;
+    u->device_desc.number_lu++;
+    raw_dev_cap += (brdv_len >> UFS_GEOMETRY_CAPACITY_SHIFT);
+    u->geometry_desc.total_raw_device_capacity = cpu_to_be64(raw_dev_cap);
+    return true;
+}
+
+static inline uint8_t ufs_log2(uint64_t input)
+{
+    int log = 0;
+    while (input >>= 1) {
+        log++;
+    }
+    return log;
+}
+
+static void ufs_init_lu(UfsLu *lu)
+{
+    BlockBackend *blk = lu->qdev.conf.blk;
+    int64_t brdv_len = blk_getlength(blk);
+
+    lu->lun = lu->qdev.lun;
+    memset(&lu->unit_desc, 0, sizeof(lu->unit_desc));
+    lu->unit_desc.length = sizeof(UnitDescriptor);
+    lu->unit_desc.descriptor_idn = UFS_QUERY_DESC_IDN_UNIT;
+    lu->unit_desc.lu_enable = 0x01;
+    lu->unit_desc.logical_block_size = ufs_log2(lu->qdev.blocksize);
+    lu->unit_desc.unit_index = lu->qdev.lun;
+    lu->unit_desc.logical_block_count =
+        cpu_to_be64(brdv_len / (1 << lu->unit_desc.logical_block_size));
+}
+
+static bool ufs_lu_check_constraints(UfsLu *lu, Error **errp)
+{
+    if (!lu->qdev.conf.blk) {
+        error_setg(errp, "drive property not set");
+        return false;
+    }
+
+    if (lu->qdev.channel != 0) {
+        error_setg(errp, "ufs logical unit does not support channel");
+        return false;
+    }
+
+    if (lu->qdev.lun >= UFS_MAX_LUS) {
+        error_setg(errp, "lun must be between 1 and %d", UFS_MAX_LUS - 1);
+        return false;
+    }
+
+    return true;
+}
+
+static void ufs_lu_realize(SCSIDevice *dev, Error **errp)
+{
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev);
+    BusState *s = qdev_get_parent_bus(&dev->qdev);
+    UfsHc *u = UFS(s->parent);
+    AioContext *ctx = NULL;
+    uint64_t nb_sectors, nb_blocks;
+
+    if (!ufs_lu_check_constraints(lu, errp)) {
+        return;
+    }
+
+    if (lu->qdev.conf.blk) {
+        ctx = blk_get_aio_context(lu->qdev.conf.blk);
+        aio_context_acquire(ctx);
+        if (!blkconf_blocksizes(&lu->qdev.conf, errp)) {
+            goto out;
+        }
+    }
+    lu->qdev.blocksize = UFS_BLOCK_SIZE;
+    blk_get_geometry(lu->qdev.conf.blk, &nb_sectors);
+    nb_blocks = nb_sectors / (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
+    if (nb_blocks > UINT32_MAX) {
+        nb_blocks = UINT32_MAX;
+    }
+    lu->qdev.max_lba = nb_blocks;
+    lu->qdev.type = TYPE_DISK;
+
+    ufs_init_lu(lu);
+    if (!ufs_add_lu(u, lu, errp)) {
+        goto out;
+    }
+
+    ufs_lu_brdv_init(lu, errp);
+out:
+    if (ctx) {
+        aio_context_release(ctx);
+    }
+}
+
+static void ufs_lu_unrealize(SCSIDevice *dev)
+{
+    UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev);
+
+    blk_drain(lu->qdev.conf.blk);
+}
+
+static void ufs_wlu_realize(DeviceState *qdev, Error **errp)
+{
+    UfsWLu *wlu = UFSWLU(qdev);
+    SCSIDevice *dev = &wlu->qdev;
+
+    if (!is_wlun(dev->lun)) {
+        error_setg(errp, "not well-known logical unit number");
+        return;
+    }
+
+    QTAILQ_INIT(&dev->requests);
+}
+
+static void ufs_lu_class_init(ObjectClass *oc, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(oc);
+    SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(oc);
+
+    sc->realize = ufs_lu_realize;
+    sc->unrealize = ufs_lu_unrealize;
+    sc->alloc_req = scsi_new_request;
+    dc->bus_type = TYPE_UFS_BUS;
+    device_class_set_props(dc, ufs_lu_props);
+    dc->desc = "Virtual UFS logical unit";
+}
+
+static void ufs_wlu_class_init(ObjectClass *oc, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(oc);
+    SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(oc);
+
+    /*
+     * The realize() function of TYPE_SCSI_DEVICE causes a segmentation fault
+     * if a block drive does not exist. Define a new realize function for
+     * well-known LUs that do not have a block drive.
+     */
+    dc->realize = ufs_wlu_realize;
+    sc->alloc_req = scsi_new_request;
+    dc->bus_type = TYPE_UFS_BUS;
+    dc->desc = "Virtual UFS well-known logical unit";
+}
+
+static const TypeInfo ufs_lu_info = {
+    .name = TYPE_UFS_LU,
+    .parent = TYPE_SCSI_DEVICE,
+    .class_init = ufs_lu_class_init,
+    .instance_size = sizeof(UfsLu),
+};
+
+static const TypeInfo ufs_wlu_info = {
+    .name = TYPE_UFS_WLU,
+    .parent = TYPE_SCSI_DEVICE,
+    .class_init = ufs_wlu_class_init,
+    .instance_size = sizeof(UfsWLu),
+};
+
+static void ufs_lu_register_types(void)
+{
+    type_register_static(&ufs_lu_info);
+    type_register_static(&ufs_wlu_info);
+}
+
+type_init(ufs_lu_register_types)
diff --git a/hw/ufs/meson.build b/hw/ufs/meson.build
new file mode 100644 (file)
index 0000000..6e68328
--- /dev/null
@@ -0,0 +1 @@
+system_ss.add(when: 'CONFIG_UFS_PCI', if_true: files('ufs.c', 'lu.c'))
diff --git a/hw/ufs/trace-events b/hw/ufs/trace-events
new file mode 100644 (file)
index 0000000..1e55fb0
--- /dev/null
@@ -0,0 +1,58 @@
+# ufs.c
+ufs_irq_raise(void) "INTx"
+ufs_irq_lower(void) "INTx"
+ufs_mmio_read(uint64_t addr, uint64_t data, unsigned size) "addr 0x%"PRIx64" data 0x%"PRIx64" size %d"
+ufs_mmio_write(uint64_t addr, uint64_t data, unsigned size) "addr 0x%"PRIx64" data 0x%"PRIx64" size %d"
+ufs_process_db(uint32_t slot) "UTRLDBR slot %"PRIu32""
+ufs_process_req(uint32_t slot) "UTRLDBR slot %"PRIu32""
+ufs_complete_req(uint32_t slot) "UTRLDBR slot %"PRIu32""
+ufs_sendback_req(uint32_t slot) "UTRLDBR slot %"PRIu32""
+ufs_exec_nop_cmd(uint32_t slot) "UTRLDBR slot %"PRIu32""
+ufs_exec_scsi_cmd(uint32_t slot, uint8_t lun, uint8_t opcode) "slot %"PRIu32", lun 0x%"PRIx8", opcode 0x%"PRIx8""
+ufs_exec_query_cmd(uint32_t slot, uint8_t opcode) "slot %"PRIu32", opcode 0x%"PRIx8""
+ufs_process_uiccmd(uint32_t uiccmd, uint32_t ucmdarg1, uint32_t ucmdarg2, uint32_t ucmdarg3) "uiccmd 0x%"PRIx32", ucmdarg1 0x%"PRIx32", ucmdarg2 0x%"PRIx32", ucmdarg3 0x%"PRIx32""
+
+# lu.c
+ufs_scsi_check_condition(uint32_t tag, uint8_t key, uint8_t asc, uint8_t ascq) "Command complete tag=0x%x sense=%d/%d/%d"
+ufs_scsi_read_complete(uint32_t tag, size_t size) "Data ready tag=0x%x len=%zd"
+ufs_scsi_read_data_count(uint32_t sector_count) "Read sector_count=%d"
+ufs_scsi_read_data_invalid(void) "Data transfer direction invalid"
+ufs_scsi_write_complete_noio(uint32_t tag, size_t size) "Write complete tag=0x%x more=%zd"
+ufs_scsi_write_data_invalid(void) "Data transfer direction invalid"
+ufs_scsi_emulate_vpd_page_00(size_t xfer) "Inquiry EVPD[Supported pages] buffer size %zd"
+ufs_scsi_emulate_vpd_page_80_not_supported(void) "Inquiry EVPD[Serial number] not supported"
+ufs_scsi_emulate_vpd_page_80(size_t xfer) "Inquiry EVPD[Serial number] buffer size %zd"
+ufs_scsi_emulate_vpd_page_87(size_t xfer) "Inquiry EVPD[Mode Page Policy] buffer size %zd"
+ufs_scsi_emulate_mode_sense(int cmd, int page, size_t xfer, int control) "Mode Sense(%d) (page %d, xfer %zd, page_control %d)"
+ufs_scsi_emulate_read_data(int buflen) "Read buf_len=%d"
+ufs_scsi_emulate_write_data(int buflen) "Write buf_len=%d"
+ufs_scsi_emulate_command_START_STOP(void) "START STOP UNIT"
+ufs_scsi_emulate_command_FORMAT_UNIT(void) "FORMAT UNIT"
+ufs_scsi_emulate_command_SEND_DIAGNOSTIC(void) "SEND DIAGNOSTIC"
+ufs_scsi_emulate_command_SAI_16(void) "SAI READ CAPACITY(16)"
+ufs_scsi_emulate_command_SAI_unsupported(void) "Unsupported Service Action In"
+ufs_scsi_emulate_command_MODE_SELECT_10(size_t xfer) "Mode Select(10) (len %zd)"
+ufs_scsi_emulate_command_VERIFY(int bytchk) "Verify (bytchk %d)"
+ufs_scsi_emulate_command_UNKNOWN(int cmd, const char *name) "Unknown SCSI command (0x%2.2x=%s)"
+ufs_scsi_dma_command_READ(uint64_t lba, uint32_t len) "Read (block %" PRIu64 ", count %u)"
+ufs_scsi_dma_command_WRITE(uint64_t lba, int len) "Write (block %" PRIu64 ", count %u)"
+
+# error condition
+ufs_err_dma_read_utrd(uint32_t slot, uint64_t addr) "failed to read utrd. UTRLDBR slot %"PRIu32", UTRD dma addr %"PRIu64""
+ufs_err_dma_read_req_upiu(uint32_t slot, uint64_t addr) "failed to read req upiu. UTRLDBR slot %"PRIu32", request upiu addr %"PRIu64""
+ufs_err_dma_read_prdt(uint32_t slot, uint64_t addr) "failed to read prdt. UTRLDBR slot %"PRIu32", prdt addr %"PRIu64""
+ufs_err_dma_write_utrd(uint32_t slot, uint64_t addr) "failed to write utrd. UTRLDBR slot %"PRIu32", UTRD dma addr %"PRIu64""
+ufs_err_dma_write_rsp_upiu(uint32_t slot, uint64_t addr) "failed to write rsp upiu. UTRLDBR slot %"PRIu32", response upiu addr %"PRIu64""
+ufs_err_utrl_slot_error(uint32_t slot) "UTRLDBR slot %"PRIu32" is in error"
+ufs_err_utrl_slot_busy(uint32_t slot) "UTRLDBR slot %"PRIu32" is busy"
+ufs_err_unsupport_register_offset(uint32_t offset) "Register offset 0x%"PRIx32" is not yet supported"
+ufs_err_invalid_register_offset(uint32_t offset) "Register offset 0x%"PRIx32" is invalid"
+ufs_err_scsi_cmd_invalid_lun(uint8_t lun) "scsi command has invalid lun: 0x%"PRIx8""
+ufs_err_query_flag_not_readable(uint8_t idn) "query flag idn 0x%"PRIx8" is denied to read"
+ufs_err_query_flag_not_writable(uint8_t idn) "query flag idn 0x%"PRIx8" is denied to write"
+ufs_err_query_attr_not_readable(uint8_t idn) "query attribute idn 0x%"PRIx8" is denied to read"
+ufs_err_query_attr_not_writable(uint8_t idn) "query attribute idn 0x%"PRIx8" is denied to write"
+ufs_err_query_invalid_opcode(uint8_t opcode) "query request has invalid opcode. opcode: 0x%"PRIx8""
+ufs_err_query_invalid_idn(uint8_t opcode, uint8_t idn) "query request has invalid idn. opcode: 0x%"PRIx8", idn 0x%"PRIx8""
+ufs_err_query_invalid_index(uint8_t opcode, uint8_t index) "query request has invalid index. opcode: 0x%"PRIx8", index 0x%"PRIx8""
+ufs_err_invalid_trans_code(uint32_t slot, uint8_t trans_code) "request upiu has invalid transaction code. slot: %"PRIu32", trans_code: 0x%"PRIx8""
diff --git a/hw/ufs/trace.h b/hw/ufs/trace.h
new file mode 100644 (file)
index 0000000..2dbd639
--- /dev/null
@@ -0,0 +1 @@
+#include "trace/trace-hw_ufs.h"
diff --git a/hw/ufs/ufs.c b/hw/ufs/ufs.c
new file mode 100644 (file)
index 0000000..0ecedb9
--- /dev/null
@@ -0,0 +1,1502 @@
+/*
+ * QEMU Universal Flash Storage (UFS) Controller
+ *
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Written by Jeuk Kim <jeuk20.kim@samsung.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+/**
+ * Reference Specs: https://www.jedec.org/, 3.1
+ *
+ * Usage
+ * -----
+ *
+ * Add options:
+ *      -drive file=<file>,if=none,id=<drive_id>
+ *      -device ufs,serial=<serial>,id=<bus_name>, \
+ *              nutrs=<N[optional]>,nutmrs=<N[optional]>
+ *      -device ufs-lu,drive=<drive_id>,bus=<bus_name>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "migration/vmstate.h"
+#include "trace.h"
+#include "ufs.h"
+
+/* The QEMU-UFS device follows spec version 3.1 */
+#define UFS_SPEC_VER 0x0310
+#define UFS_MAX_NUTRS 32
+#define UFS_MAX_NUTMRS 8
+
+static MemTxResult ufs_addr_read(UfsHc *u, hwaddr addr, void *buf, int size)
+{
+    hwaddr hi = addr + size - 1;
+
+    if (hi < addr) {
+        return MEMTX_DECODE_ERROR;
+    }
+
+    if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
+        return MEMTX_DECODE_ERROR;
+    }
+
+    return pci_dma_read(PCI_DEVICE(u), addr, buf, size);
+}
+
+static MemTxResult ufs_addr_write(UfsHc *u, hwaddr addr, const void *buf,
+                                  int size)
+{
+    hwaddr hi = addr + size - 1;
+    if (hi < addr) {
+        return MEMTX_DECODE_ERROR;
+    }
+
+    if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
+        return MEMTX_DECODE_ERROR;
+    }
+
+    return pci_dma_write(PCI_DEVICE(u), addr, buf, size);
+}
+
+static void ufs_complete_req(UfsRequest *req, UfsReqResult req_result);
+
+static inline hwaddr ufs_get_utrd_addr(UfsHc *u, uint32_t slot)
+{
+    hwaddr utrl_base_addr = (((hwaddr)u->reg.utrlbau) << 32) + u->reg.utrlba;
+    hwaddr utrd_addr = utrl_base_addr + slot * sizeof(UtpTransferReqDesc);
+
+    return utrd_addr;
+}
+
+static inline hwaddr ufs_get_req_upiu_base_addr(const UtpTransferReqDesc *utrd)
+{
+    uint32_t cmd_desc_base_addr_lo =
+        le32_to_cpu(utrd->command_desc_base_addr_lo);
+    uint32_t cmd_desc_base_addr_hi =
+        le32_to_cpu(utrd->command_desc_base_addr_hi);
+
+    return (((hwaddr)cmd_desc_base_addr_hi) << 32) + cmd_desc_base_addr_lo;
+}
+
+static inline hwaddr ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc *utrd)
+{
+    hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(utrd);
+    uint32_t rsp_upiu_byte_off =
+        le16_to_cpu(utrd->response_upiu_offset) * sizeof(uint32_t);
+    return req_upiu_base_addr + rsp_upiu_byte_off;
+}
+
+static MemTxResult ufs_dma_read_utrd(UfsRequest *req)
+{
+    UfsHc *u = req->hc;
+    hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
+    MemTxResult ret;
+
+    ret = ufs_addr_read(u, utrd_addr, &req->utrd, sizeof(req->utrd));
+    if (ret) {
+        trace_ufs_err_dma_read_utrd(req->slot, utrd_addr);
+    }
+    return ret;
+}
+
+static MemTxResult ufs_dma_read_req_upiu(UfsRequest *req)
+{
+    UfsHc *u = req->hc;
+    hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
+    UtpUpiuReq *req_upiu = &req->req_upiu;
+    uint32_t copy_size;
+    uint16_t data_segment_length;
+    MemTxResult ret;
+
+    /*
+     * To know the size of the req_upiu, we need to read the
+     * data_segment_length in the header first.
+     */
+    ret = ufs_addr_read(u, req_upiu_base_addr, &req_upiu->header,
+                        sizeof(UtpUpiuHeader));
+    if (ret) {
+        trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
+        return ret;
+    }
+    data_segment_length = be16_to_cpu(req_upiu->header.data_segment_length);
+
+    copy_size = sizeof(UtpUpiuHeader) + UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
+                data_segment_length;
+
+    ret = ufs_addr_read(u, req_upiu_base_addr, &req->req_upiu, copy_size);
+    if (ret) {
+        trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
+    }
+    return ret;
+}
+
+static MemTxResult ufs_dma_read_prdt(UfsRequest *req)
+{
+    UfsHc *u = req->hc;
+    uint16_t prdt_len = le16_to_cpu(req->utrd.prd_table_length);
+    uint16_t prdt_byte_off =
+        le16_to_cpu(req->utrd.prd_table_offset) * sizeof(uint32_t);
+    uint32_t prdt_size = prdt_len * sizeof(UfshcdSgEntry);
+    g_autofree UfshcdSgEntry *prd_entries = NULL;
+    hwaddr req_upiu_base_addr, prdt_base_addr;
+    int err;
+
+    assert(!req->sg);
+
+    if (prdt_size == 0) {
+        return MEMTX_OK;
+    }
+    prd_entries = g_new(UfshcdSgEntry, prdt_size);
+
+    req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
+    prdt_base_addr = req_upiu_base_addr + prdt_byte_off;
+
+    err = ufs_addr_read(u, prdt_base_addr, prd_entries, prdt_size);
+    if (err) {
+        trace_ufs_err_dma_read_prdt(req->slot, prdt_base_addr);
+        return err;
+    }
+
+    req->sg = g_malloc0(sizeof(QEMUSGList));
+    pci_dma_sglist_init(req->sg, PCI_DEVICE(u), prdt_len);
+
+    for (uint16_t i = 0; i < prdt_len; ++i) {
+        hwaddr data_dma_addr = le64_to_cpu(prd_entries[i].addr);
+        uint32_t data_byte_count = le32_to_cpu(prd_entries[i].size) + 1;
+        qemu_sglist_add(req->sg, data_dma_addr, data_byte_count);
+    }
+    return MEMTX_OK;
+}
+
+static MemTxResult ufs_dma_read_upiu(UfsRequest *req)
+{
+    MemTxResult ret;
+
+    ret = ufs_dma_read_utrd(req);
+    if (ret) {
+        return ret;
+    }
+
+    ret = ufs_dma_read_req_upiu(req);
+    if (ret) {
+        return ret;
+    }
+
+    ret = ufs_dma_read_prdt(req);
+    if (ret) {
+        return ret;
+    }
+
+    return 0;
+}
+
+static MemTxResult ufs_dma_write_utrd(UfsRequest *req)
+{
+    UfsHc *u = req->hc;
+    hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
+    MemTxResult ret;
+
+    ret = ufs_addr_write(u, utrd_addr, &req->utrd, sizeof(req->utrd));
+    if (ret) {
+        trace_ufs_err_dma_write_utrd(req->slot, utrd_addr);
+    }
+    return ret;
+}
+
+static MemTxResult ufs_dma_write_rsp_upiu(UfsRequest *req)
+{
+    UfsHc *u = req->hc;
+    hwaddr rsp_upiu_base_addr = ufs_get_rsp_upiu_base_addr(&req->utrd);
+    uint32_t rsp_upiu_byte_len =
+        le16_to_cpu(req->utrd.response_upiu_length) * sizeof(uint32_t);
+    uint16_t data_segment_length =
+        be16_to_cpu(req->rsp_upiu.header.data_segment_length);
+    uint32_t copy_size = sizeof(UtpUpiuHeader) +
+                         UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
+                         data_segment_length;
+    MemTxResult ret;
+
+    if (copy_size > rsp_upiu_byte_len) {
+        copy_size = rsp_upiu_byte_len;
+    }
+
+    ret = ufs_addr_write(u, rsp_upiu_base_addr, &req->rsp_upiu, copy_size);
+    if (ret) {
+        trace_ufs_err_dma_write_rsp_upiu(req->slot, rsp_upiu_base_addr);
+    }
+    return ret;
+}
+
+static MemTxResult ufs_dma_write_upiu(UfsRequest *req)
+{
+    MemTxResult ret;
+
+    ret = ufs_dma_write_rsp_upiu(req);
+    if (ret) {
+        return ret;
+    }
+
+    return ufs_dma_write_utrd(req);
+}
+
+static void ufs_irq_check(UfsHc *u)
+{
+    PCIDevice *pci = PCI_DEVICE(u);
+
+    if ((u->reg.is & UFS_INTR_MASK) & u->reg.ie) {
+        trace_ufs_irq_raise();
+        pci_irq_assert(pci);
+    } else {
+        trace_ufs_irq_lower();
+        pci_irq_deassert(pci);
+    }
+}
+
+static void ufs_process_db(UfsHc *u, uint32_t val)
+{
+    unsigned long doorbell;
+    uint32_t slot;
+    uint32_t nutrs = u->params.nutrs;
+    UfsRequest *req;
+
+    val &= ~u->reg.utrldbr;
+    if (!val) {
+        return;
+    }
+
+    doorbell = val;
+    slot = find_first_bit(&doorbell, nutrs);
+
+    while (slot < nutrs) {
+        req = &u->req_list[slot];
+        if (req->state == UFS_REQUEST_ERROR) {
+            trace_ufs_err_utrl_slot_error(req->slot);
+            return;
+        }
+
+        if (req->state != UFS_REQUEST_IDLE) {
+            trace_ufs_err_utrl_slot_busy(req->slot);
+            return;
+        }
+
+        trace_ufs_process_db(slot);
+        req->state = UFS_REQUEST_READY;
+        slot = find_next_bit(&doorbell, nutrs, slot + 1);
+    }
+
+    qemu_bh_schedule(u->doorbell_bh);
+}
+
+static void ufs_process_uiccmd(UfsHc *u, uint32_t val)
+{
+    trace_ufs_process_uiccmd(val, u->reg.ucmdarg1, u->reg.ucmdarg2,
+                             u->reg.ucmdarg3);
+    /*
+     * Only the essential uic commands for running drivers on Linux and Windows
+     * are implemented.
+     */
+    switch (val) {
+    case UFS_UIC_CMD_DME_LINK_STARTUP:
+        u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, DP, 1);
+        u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTRLRDY, 1);
+        u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTMRLRDY, 1);
+        u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
+        break;
+    /* TODO: Revisit it when Power Management is implemented */
+    case UFS_UIC_CMD_DME_HIBER_ENTER:
+        u->reg.is = FIELD_DP32(u->reg.is, IS, UHES, 1);
+        u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
+        u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
+        break;
+    case UFS_UIC_CMD_DME_HIBER_EXIT:
+        u->reg.is = FIELD_DP32(u->reg.is, IS, UHXS, 1);
+        u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
+        u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
+        break;
+    default:
+        u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_FAILURE;
+    }
+
+    u->reg.is = FIELD_DP32(u->reg.is, IS, UCCS, 1);
+
+    ufs_irq_check(u);
+}
+
+static void ufs_write_reg(UfsHc *u, hwaddr offset, uint32_t data, unsigned size)
+{
+    switch (offset) {
+    case A_IS:
+        u->reg.is &= ~data;
+        ufs_irq_check(u);
+        break;
+    case A_IE:
+        u->reg.ie = data;
+        ufs_irq_check(u);
+        break;
+    case A_HCE:
+        if (!FIELD_EX32(u->reg.hce, HCE, HCE) && FIELD_EX32(data, HCE, HCE)) {
+            u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UCRDY, 1);
+            u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 1);
+        } else if (FIELD_EX32(u->reg.hce, HCE, HCE) &&
+                   !FIELD_EX32(data, HCE, HCE)) {
+            u->reg.hcs = 0;
+            u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 0);
+        }
+        break;
+    case A_UTRLBA:
+        u->reg.utrlba = data & R_UTRLBA_UTRLBA_MASK;
+        break;
+    case A_UTRLBAU:
+        u->reg.utrlbau = data;
+        break;
+    case A_UTRLDBR:
+        ufs_process_db(u, data);
+        u->reg.utrldbr |= data;
+        break;
+    case A_UTRLRSR:
+        u->reg.utrlrsr = data;
+        break;
+    case A_UTRLCNR:
+        u->reg.utrlcnr &= ~data;
+        break;
+    case A_UTMRLBA:
+        u->reg.utmrlba = data & R_UTMRLBA_UTMRLBA_MASK;
+        break;
+    case A_UTMRLBAU:
+        u->reg.utmrlbau = data;
+        break;
+    case A_UICCMD:
+        ufs_process_uiccmd(u, data);
+        break;
+    case A_UCMDARG1:
+        u->reg.ucmdarg1 = data;
+        break;
+    case A_UCMDARG2:
+        u->reg.ucmdarg2 = data;
+        break;
+    case A_UCMDARG3:
+        u->reg.ucmdarg3 = data;
+        break;
+    case A_UTRLCLR:
+    case A_UTMRLDBR:
+    case A_UTMRLCLR:
+    case A_UTMRLRSR:
+        trace_ufs_err_unsupport_register_offset(offset);
+        break;
+    default:
+        trace_ufs_err_invalid_register_offset(offset);
+        break;
+    }
+}
+
+static uint64_t ufs_mmio_read(void *opaque, hwaddr addr, unsigned size)
+{
+    UfsHc *u = (UfsHc *)opaque;
+    uint8_t *ptr = (uint8_t *)&u->reg;
+    uint64_t value;
+
+    if (addr > sizeof(u->reg) - size) {
+        trace_ufs_err_invalid_register_offset(addr);
+        return 0;
+    }
+
+    value = *(uint32_t *)(ptr + addr);
+    trace_ufs_mmio_read(addr, value, size);
+    return value;
+}
+
+static void ufs_mmio_write(void *opaque, hwaddr addr, uint64_t data,
+                           unsigned size)
+{
+    UfsHc *u = (UfsHc *)opaque;
+
+    if (addr > sizeof(u->reg) - size) {
+        trace_ufs_err_invalid_register_offset(addr);
+        return;
+    }
+
+    trace_ufs_mmio_write(addr, data, size);
+    ufs_write_reg(u, addr, data, size);
+}
+
+static const MemoryRegionOps ufs_mmio_ops = {
+    .read = ufs_mmio_read,
+    .write = ufs_mmio_write,
+    .endianness = DEVICE_LITTLE_ENDIAN,
+    .impl = {
+        .min_access_size = 4,
+        .max_access_size = 4,
+    },
+};
+
+static QEMUSGList *ufs_get_sg_list(SCSIRequest *scsi_req)
+{
+    UfsRequest *req = scsi_req->hba_private;
+    return req->sg;
+}
+
+static void ufs_build_upiu_sense_data(UfsRequest *req, SCSIRequest *scsi_req)
+{
+    req->rsp_upiu.sr.sense_data_len = cpu_to_be16(scsi_req->sense_len);
+    assert(scsi_req->sense_len <= SCSI_SENSE_LEN);
+    memcpy(req->rsp_upiu.sr.sense_data, scsi_req->sense, scsi_req->sense_len);
+}
+
+static void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type,
+                                  uint8_t flags, uint8_t response,
+                                  uint8_t scsi_status,
+                                  uint16_t data_segment_length)
+{
+    memcpy(&req->rsp_upiu.header, &req->req_upiu.header, sizeof(UtpUpiuHeader));
+    req->rsp_upiu.header.trans_type = trans_type;
+    req->rsp_upiu.header.flags = flags;
+    req->rsp_upiu.header.response = response;
+    req->rsp_upiu.header.scsi_status = scsi_status;
+    req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length);
+}
+
+static void ufs_scsi_command_complete(SCSIRequest *scsi_req, size_t resid)
+{
+    UfsRequest *req = scsi_req->hba_private;
+    int16_t status = scsi_req->status;
+    uint32_t expected_len = be32_to_cpu(req->req_upiu.sc.exp_data_transfer_len);
+    uint32_t transfered_len = scsi_req->cmd.xfer - resid;
+    uint8_t flags = 0, response = UFS_COMMAND_RESULT_SUCESS;
+    uint16_t data_segment_length;
+
+    if (expected_len > transfered_len) {
+        req->rsp_upiu.sr.residual_transfer_count =
+            cpu_to_be32(expected_len - transfered_len);
+        flags |= UFS_UPIU_FLAG_UNDERFLOW;
+    } else if (expected_len < transfered_len) {
+        req->rsp_upiu.sr.residual_transfer_count =
+            cpu_to_be32(transfered_len - expected_len);
+        flags |= UFS_UPIU_FLAG_OVERFLOW;
+    }
+
+    if (status != 0) {
+        ufs_build_upiu_sense_data(req, scsi_req);
+        response = UFS_COMMAND_RESULT_FAIL;
+    }
+
+    data_segment_length = cpu_to_be16(scsi_req->sense_len +
+                                      sizeof(req->rsp_upiu.sr.sense_data_len));
+    ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_RESPONSE, flags, response,
+                          status, data_segment_length);
+
+    ufs_complete_req(req, UFS_REQUEST_SUCCESS);
+
+    scsi_req->hba_private = NULL;
+    scsi_req_unref(scsi_req);
+}
+
+static const struct SCSIBusInfo ufs_scsi_info = {
+    .tcq = true,
+    .max_target = 0,
+    .max_lun = UFS_MAX_LUS,
+    .max_channel = 0,
+
+    .get_sg_list = ufs_get_sg_list,
+    .complete = ufs_scsi_command_complete,
+};
+
+static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req)
+{
+    UfsHc *u = req->hc;
+    uint8_t lun = req->req_upiu.header.lun;
+    uint8_t task_tag = req->req_upiu.header.task_tag;
+    SCSIDevice *dev = NULL;
+
+    trace_ufs_exec_scsi_cmd(req->slot, lun, req->req_upiu.sc.cdb[0]);
+
+    if (!is_wlun(lun)) {
+        if (lun >= u->device_desc.number_lu) {
+            trace_ufs_err_scsi_cmd_invalid_lun(lun);
+            return UFS_REQUEST_FAIL;
+        } else if (u->lus[lun] == NULL) {
+            trace_ufs_err_scsi_cmd_invalid_lun(lun);
+            return UFS_REQUEST_FAIL;
+        }
+    }
+
+    switch (lun) {
+    case UFS_UPIU_REPORT_LUNS_WLUN:
+        dev = &u->report_wlu->qdev;
+        break;
+    case UFS_UPIU_UFS_DEVICE_WLUN:
+        dev = &u->dev_wlu->qdev;
+        break;
+    case UFS_UPIU_BOOT_WLUN:
+        dev = &u->boot_wlu->qdev;
+        break;
+    case UFS_UPIU_RPMB_WLUN:
+        dev = &u->rpmb_wlu->qdev;
+        break;
+    default:
+        dev = &u->lus[lun]->qdev;
+    }
+
+    SCSIRequest *scsi_req = scsi_req_new(
+        dev, task_tag, lun, req->req_upiu.sc.cdb, UFS_CDB_SIZE, req);
+
+    uint32_t len = scsi_req_enqueue(scsi_req);
+    if (len) {
+        scsi_req_continue(scsi_req);
+    }
+
+    return UFS_REQUEST_NO_COMPLETE;
+}
+
+static UfsReqResult ufs_exec_nop_cmd(UfsRequest *req)
+{
+    trace_ufs_exec_nop_cmd(req->slot);
+    ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_NOP_IN, 0, 0, 0, 0);
+    return UFS_REQUEST_SUCCESS;
+}
+
+/*
+ * This defines the permission of flags based on their IDN. There are some
+ * things that are declared read-only, which is inconsistent with the ufs spec,
+ * because we want to return an error for features that are not yet supported.
+ */
+static const int flag_permission[UFS_QUERY_FLAG_IDN_COUNT] = {
+    [UFS_QUERY_FLAG_IDN_FDEVICEINIT] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET,
+    /* Write protection is not supported */
+    [UFS_QUERY_FLAG_IDN_PERMANENT_WPE] = UFS_QUERY_FLAG_READ,
+    [UFS_QUERY_FLAG_IDN_PWR_ON_WPE] = UFS_QUERY_FLAG_READ,
+    [UFS_QUERY_FLAG_IDN_BKOPS_EN] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET |
+                                    UFS_QUERY_FLAG_CLEAR |
+                                    UFS_QUERY_FLAG_TOGGLE,
+    [UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE] =
+        UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET | UFS_QUERY_FLAG_CLEAR |
+        UFS_QUERY_FLAG_TOGGLE,
+    /* Purge Operation is not supported */
+    [UFS_QUERY_FLAG_IDN_PURGE_ENABLE] = UFS_QUERY_FLAG_NONE,
+    /* Refresh Operation is not supported */
+    [UFS_QUERY_FLAG_IDN_REFRESH_ENABLE] = UFS_QUERY_FLAG_NONE,
+    /* Physical Resource Removal is not supported */
+    [UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL] = UFS_QUERY_FLAG_READ,
+    [UFS_QUERY_FLAG_IDN_BUSY_RTC] = UFS_QUERY_FLAG_READ,
+    [UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE] = UFS_QUERY_FLAG_READ,
+    /* Write Booster is not supported */
+    [UFS_QUERY_FLAG_IDN_WB_EN] = UFS_QUERY_FLAG_READ,
+    [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN] = UFS_QUERY_FLAG_READ,
+    [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8] = UFS_QUERY_FLAG_READ,
+};
+
+static inline QueryRespCode ufs_flag_check_idn_valid(uint8_t idn, int op)
+{
+    if (idn >= UFS_QUERY_FLAG_IDN_COUNT) {
+        return UFS_QUERY_RESULT_INVALID_IDN;
+    }
+
+    if (!(flag_permission[idn] & op)) {
+        if (op == UFS_QUERY_FLAG_READ) {
+            trace_ufs_err_query_flag_not_readable(idn);
+            return UFS_QUERY_RESULT_NOT_READABLE;
+        }
+        trace_ufs_err_query_flag_not_writable(idn);
+        return UFS_QUERY_RESULT_NOT_WRITEABLE;
+    }
+
+    return UFS_QUERY_RESULT_SUCCESS;
+}
+
+static const int attr_permission[UFS_QUERY_ATTR_IDN_COUNT] = {
+    /* booting is not supported */
+    [UFS_QUERY_ATTR_IDN_BOOT_LU_EN] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_POWER_MODE] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL] =
+        UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+    [UFS_QUERY_ATTR_IDN_OOO_DATA_EN] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_BKOPS_STATUS] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_PURGE_STATUS] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_MAX_DATA_IN] =
+        UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+    [UFS_QUERY_ATTR_IDN_MAX_DATA_OUT] =
+        UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+    [UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_REF_CLK_FREQ] =
+        UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+    [UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT] =
+        UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+    [UFS_QUERY_ATTR_IDN_EE_CONTROL] =
+        UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+    [UFS_QUERY_ATTR_IDN_EE_STATUS] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_SECONDS_PASSED] = UFS_QUERY_ATTR_WRITE,
+    [UFS_QUERY_ATTR_IDN_CNTX_CONF] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_FFU_STATUS] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_PSA_STATE] = UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+    [UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE] =
+        UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+    [UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_THROTTLING_STATUS] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
+    /* refresh operation is not supported */
+    [UFS_QUERY_ATTR_IDN_REFRESH_STATUS] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_REFRESH_FREQ] = UFS_QUERY_ATTR_READ,
+    [UFS_QUERY_ATTR_IDN_REFRESH_UNIT] = UFS_QUERY_ATTR_READ,
+};
+
+static inline QueryRespCode ufs_attr_check_idn_valid(uint8_t idn, int op)
+{
+    if (idn >= UFS_QUERY_ATTR_IDN_COUNT) {
+        return UFS_QUERY_RESULT_INVALID_IDN;
+    }
+
+    if (!(attr_permission[idn] & op)) {
+        if (op == UFS_QUERY_ATTR_READ) {
+            trace_ufs_err_query_attr_not_readable(idn);
+            return UFS_QUERY_RESULT_NOT_READABLE;
+        }
+        trace_ufs_err_query_attr_not_writable(idn);
+        return UFS_QUERY_RESULT_NOT_WRITEABLE;
+    }
+
+    return UFS_QUERY_RESULT_SUCCESS;
+}
+
+static QueryRespCode ufs_exec_query_flag(UfsRequest *req, int op)
+{
+    UfsHc *u = req->hc;
+    uint8_t idn = req->req_upiu.qr.idn;
+    uint32_t value;
+    QueryRespCode ret;
+
+    ret = ufs_flag_check_idn_valid(idn, op);
+    if (ret) {
+        return ret;
+    }
+
+    if (idn == UFS_QUERY_FLAG_IDN_FDEVICEINIT) {
+        value = 0;
+    } else if (op == UFS_QUERY_FLAG_READ) {
+        value = *(((uint8_t *)&u->flags) + idn);
+    } else if (op == UFS_QUERY_FLAG_SET) {
+        value = 1;
+    } else if (op == UFS_QUERY_FLAG_CLEAR) {
+        value = 0;
+    } else if (op == UFS_QUERY_FLAG_TOGGLE) {
+        value = *(((uint8_t *)&u->flags) + idn);
+        value = !value;
+    } else {
+        trace_ufs_err_query_invalid_opcode(op);
+        return UFS_QUERY_RESULT_INVALID_OPCODE;
+    }
+
+    *(((uint8_t *)&u->flags) + idn) = value;
+    req->rsp_upiu.qr.value = cpu_to_be32(value);
+    return UFS_QUERY_RESULT_SUCCESS;
+}
+
+static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
+{
+    switch (idn) {
+    case UFS_QUERY_ATTR_IDN_BOOT_LU_EN:
+        return u->attributes.boot_lun_en;
+    case UFS_QUERY_ATTR_IDN_POWER_MODE:
+        return u->attributes.current_power_mode;
+    case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
+        return u->attributes.active_icc_level;
+    case UFS_QUERY_ATTR_IDN_OOO_DATA_EN:
+        return u->attributes.out_of_order_data_en;
+    case UFS_QUERY_ATTR_IDN_BKOPS_STATUS:
+        return u->attributes.background_op_status;
+    case UFS_QUERY_ATTR_IDN_PURGE_STATUS:
+        return u->attributes.purge_status;
+    case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
+        return u->attributes.max_data_in_size;
+    case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
+        return u->attributes.max_data_out_size;
+    case UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED:
+        return be32_to_cpu(u->attributes.dyn_cap_needed);
+    case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
+        return u->attributes.ref_clk_freq;
+    case UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK:
+        return u->attributes.config_descr_lock;
+    case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
+        return u->attributes.max_num_of_rtt;
+    case UFS_QUERY_ATTR_IDN_EE_CONTROL:
+        return be16_to_cpu(u->attributes.exception_event_control);
+    case UFS_QUERY_ATTR_IDN_EE_STATUS:
+        return be16_to_cpu(u->attributes.exception_event_status);
+    case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
+        return be32_to_cpu(u->attributes.seconds_passed);
+    case UFS_QUERY_ATTR_IDN_CNTX_CONF:
+        return be16_to_cpu(u->attributes.context_conf);
+    case UFS_QUERY_ATTR_IDN_FFU_STATUS:
+        return u->attributes.device_ffu_status;
+    case UFS_QUERY_ATTR_IDN_PSA_STATE:
+        return be32_to_cpu(u->attributes.psa_state);
+    case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
+        return be32_to_cpu(u->attributes.psa_data_size);
+    case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME:
+        return u->attributes.ref_clk_gating_wait_time;
+    case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP:
+        return u->attributes.device_case_rough_temperaure;
+    case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND:
+        return u->attributes.device_too_high_temp_boundary;
+    case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND:
+        return u->attributes.device_too_low_temp_boundary;
+    case UFS_QUERY_ATTR_IDN_THROTTLING_STATUS:
+        return u->attributes.throttling_status;
+    case UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS:
+        return u->attributes.wb_buffer_flush_status;
+    case UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE:
+        return u->attributes.available_wb_buffer_size;
+    case UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST:
+        return u->attributes.wb_buffer_life_time_est;
+    case UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE:
+        return be32_to_cpu(u->attributes.current_wb_buffer_size);
+    case UFS_QUERY_ATTR_IDN_REFRESH_STATUS:
+        return u->attributes.refresh_status;
+    case UFS_QUERY_ATTR_IDN_REFRESH_FREQ:
+        return u->attributes.refresh_freq;
+    case UFS_QUERY_ATTR_IDN_REFRESH_UNIT:
+        return u->attributes.refresh_unit;
+    }
+    return 0;
+}
+
+static void ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value)
+{
+    switch (idn) {
+    case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
+        u->attributes.active_icc_level = value;
+        break;
+    case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
+        u->attributes.max_data_in_size = value;
+        break;
+    case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
+        u->attributes.max_data_out_size = value;
+        break;
+    case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
+        u->attributes.ref_clk_freq = value;
+        break;
+    case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
+        u->attributes.max_num_of_rtt = value;
+        break;
+    case UFS_QUERY_ATTR_IDN_EE_CONTROL:
+        u->attributes.exception_event_control = cpu_to_be16(value);
+        break;
+    case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
+        u->attributes.seconds_passed = cpu_to_be32(value);
+        break;
+    case UFS_QUERY_ATTR_IDN_PSA_STATE:
+        u->attributes.psa_state = value;
+        break;
+    case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
+        u->attributes.psa_data_size = cpu_to_be32(value);
+        break;
+    }
+}
+
+static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op)
+{
+    UfsHc *u = req->hc;
+    uint8_t idn = req->req_upiu.qr.idn;
+    uint32_t value;
+    QueryRespCode ret;
+
+    ret = ufs_attr_check_idn_valid(idn, op);
+    if (ret) {
+        return ret;
+    }
+
+    if (op == UFS_QUERY_ATTR_READ) {
+        value = ufs_read_attr_value(u, idn);
+    } else {
+        value = be32_to_cpu(req->req_upiu.qr.value);
+        ufs_write_attr_value(u, idn, value);
+    }
+
+    req->rsp_upiu.qr.value = cpu_to_be32(value);
+    return UFS_QUERY_RESULT_SUCCESS;
+}
+
+static const RpmbUnitDescriptor rpmb_unit_desc = {
+    .length = sizeof(RpmbUnitDescriptor),
+    .descriptor_idn = 2,
+    .unit_index = UFS_UPIU_RPMB_WLUN,
+    .lu_enable = 0,
+};
+
+static QueryRespCode ufs_read_unit_desc(UfsRequest *req)
+{
+    UfsHc *u = req->hc;
+    uint8_t lun = req->req_upiu.qr.index;
+
+    if (lun != UFS_UPIU_RPMB_WLUN &&
+        (lun > UFS_MAX_LUS || u->lus[lun] == NULL)) {
+        trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, lun);
+        return UFS_QUERY_RESULT_INVALID_INDEX;
+    }
+
+    if (lun == UFS_UPIU_RPMB_WLUN) {
+        memcpy(&req->rsp_upiu.qr.data, &rpmb_unit_desc, rpmb_unit_desc.length);
+    } else {
+        memcpy(&req->rsp_upiu.qr.data, &u->lus[lun]->unit_desc,
+               sizeof(u->lus[lun]->unit_desc));
+    }
+
+    return UFS_QUERY_RESULT_SUCCESS;
+}
+
+static inline StringDescriptor manufacturer_str_desc(void)
+{
+    StringDescriptor desc = {
+        .length = 0x12,
+        .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
+    };
+    desc.UC[0] = cpu_to_be16('R');
+    desc.UC[1] = cpu_to_be16('E');
+    desc.UC[2] = cpu_to_be16('D');
+    desc.UC[3] = cpu_to_be16('H');
+    desc.UC[4] = cpu_to_be16('A');
+    desc.UC[5] = cpu_to_be16('T');
+    return desc;
+}
+
+static inline StringDescriptor product_name_str_desc(void)
+{
+    StringDescriptor desc = {
+        .length = 0x22,
+        .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
+    };
+    desc.UC[0] = cpu_to_be16('Q');
+    desc.UC[1] = cpu_to_be16('E');
+    desc.UC[2] = cpu_to_be16('M');
+    desc.UC[3] = cpu_to_be16('U');
+    desc.UC[4] = cpu_to_be16(' ');
+    desc.UC[5] = cpu_to_be16('U');
+    desc.UC[6] = cpu_to_be16('F');
+    desc.UC[7] = cpu_to_be16('S');
+    return desc;
+}
+
+static inline StringDescriptor product_rev_level_str_desc(void)
+{
+    StringDescriptor desc = {
+        .length = 0x0a,
+        .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
+    };
+    desc.UC[0] = cpu_to_be16('0');
+    desc.UC[1] = cpu_to_be16('0');
+    desc.UC[2] = cpu_to_be16('0');
+    desc.UC[3] = cpu_to_be16('1');
+    return desc;
+}
+
+static const StringDescriptor null_str_desc = {
+    .length = 0x02,
+    .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
+};
+
+static QueryRespCode ufs_read_string_desc(UfsRequest *req)
+{
+    UfsHc *u = req->hc;
+    uint8_t index = req->req_upiu.qr.index;
+    StringDescriptor desc;
+
+    if (index == u->device_desc.manufacturer_name) {
+        desc = manufacturer_str_desc();
+        memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
+    } else if (index == u->device_desc.product_name) {
+        desc = product_name_str_desc();
+        memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
+    } else if (index == u->device_desc.serial_number) {
+        memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
+    } else if (index == u->device_desc.oem_id) {
+        memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
+    } else if (index == u->device_desc.product_revision_level) {
+        desc = product_rev_level_str_desc();
+        memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
+    } else {
+        trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, index);
+        return UFS_QUERY_RESULT_INVALID_INDEX;
+    }
+    return UFS_QUERY_RESULT_SUCCESS;
+}
+
+static inline InterconnectDescriptor interconnect_desc(void)
+{
+    InterconnectDescriptor desc = {
+        .length = sizeof(InterconnectDescriptor),
+        .descriptor_idn = UFS_QUERY_DESC_IDN_INTERCONNECT,
+    };
+    desc.bcd_unipro_version = cpu_to_be16(0x180);
+    desc.bcd_mphy_version = cpu_to_be16(0x410);
+    return desc;
+}
+
+static QueryRespCode ufs_read_desc(UfsRequest *req)
+{
+    UfsHc *u = req->hc;
+    QueryRespCode status;
+    uint8_t idn = req->req_upiu.qr.idn;
+    uint16_t length = be16_to_cpu(req->req_upiu.qr.length);
+    InterconnectDescriptor desc;
+
+    switch (idn) {
+    case UFS_QUERY_DESC_IDN_DEVICE:
+        memcpy(&req->rsp_upiu.qr.data, &u->device_desc, sizeof(u->device_desc));
+        status = UFS_QUERY_RESULT_SUCCESS;
+        break;
+    case UFS_QUERY_DESC_IDN_UNIT:
+        status = ufs_read_unit_desc(req);
+        break;
+    case UFS_QUERY_DESC_IDN_GEOMETRY:
+        memcpy(&req->rsp_upiu.qr.data, &u->geometry_desc,
+               sizeof(u->geometry_desc));
+        status = UFS_QUERY_RESULT_SUCCESS;
+        break;
+    case UFS_QUERY_DESC_IDN_INTERCONNECT: {
+        desc = interconnect_desc();
+        memcpy(&req->rsp_upiu.qr.data, &desc, sizeof(InterconnectDescriptor));
+        status = UFS_QUERY_RESULT_SUCCESS;
+        break;
+    }
+    case UFS_QUERY_DESC_IDN_STRING:
+        status = ufs_read_string_desc(req);
+        break;
+    case UFS_QUERY_DESC_IDN_POWER:
+        /* mocking of power descriptor is not supported */
+        memset(&req->rsp_upiu.qr.data, 0, sizeof(PowerParametersDescriptor));
+        req->rsp_upiu.qr.data[0] = sizeof(PowerParametersDescriptor);
+        req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_POWER;
+        status = UFS_QUERY_RESULT_SUCCESS;
+        break;
+    case UFS_QUERY_DESC_IDN_HEALTH:
+        /* mocking of health descriptor is not supported */
+        memset(&req->rsp_upiu.qr.data, 0, sizeof(DeviceHealthDescriptor));
+        req->rsp_upiu.qr.data[0] = sizeof(DeviceHealthDescriptor);
+        req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_HEALTH;
+        status = UFS_QUERY_RESULT_SUCCESS;
+        break;
+    default:
+        length = 0;
+        trace_ufs_err_query_invalid_idn(req->req_upiu.qr.opcode, idn);
+        status = UFS_QUERY_RESULT_INVALID_IDN;
+    }
+
+    if (length > req->rsp_upiu.qr.data[0]) {
+        length = req->rsp_upiu.qr.data[0];
+    }
+    req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode;
+    req->rsp_upiu.qr.idn = req->req_upiu.qr.idn;
+    req->rsp_upiu.qr.index = req->req_upiu.qr.index;
+    req->rsp_upiu.qr.selector = req->req_upiu.qr.selector;
+    req->rsp_upiu.qr.length = cpu_to_be16(length);
+
+    return status;
+}
+
+static QueryRespCode ufs_exec_query_read(UfsRequest *req)
+{
+    QueryRespCode status;
+    switch (req->req_upiu.qr.opcode) {
+    case UFS_UPIU_QUERY_OPCODE_NOP:
+        status = UFS_QUERY_RESULT_SUCCESS;
+        break;
+    case UFS_UPIU_QUERY_OPCODE_READ_DESC:
+        status = ufs_read_desc(req);
+        break;
+    case UFS_UPIU_QUERY_OPCODE_READ_ATTR:
+        status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_READ);
+        break;
+    case UFS_UPIU_QUERY_OPCODE_READ_FLAG:
+        status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_READ);
+        break;
+    default:
+        trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
+        status = UFS_QUERY_RESULT_INVALID_OPCODE;
+        break;
+    }
+
+    return status;
+}
+
+static QueryRespCode ufs_exec_query_write(UfsRequest *req)
+{
+    QueryRespCode status;
+    switch (req->req_upiu.qr.opcode) {
+    case UFS_UPIU_QUERY_OPCODE_NOP:
+        status = UFS_QUERY_RESULT_SUCCESS;
+        break;
+    case UFS_UPIU_QUERY_OPCODE_WRITE_DESC:
+        /* write descriptor is not supported */
+        status = UFS_QUERY_RESULT_NOT_WRITEABLE;
+        break;
+    case UFS_UPIU_QUERY_OPCODE_WRITE_ATTR:
+        status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_WRITE);
+        break;
+    case UFS_UPIU_QUERY_OPCODE_SET_FLAG:
+        status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_SET);
+        break;
+    case UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG:
+        status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_CLEAR);
+        break;
+    case UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG:
+        status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_TOGGLE);
+        break;
+    default:
+        trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
+        status = UFS_QUERY_RESULT_INVALID_OPCODE;
+        break;
+    }
+
+    return status;
+}
+
+static UfsReqResult ufs_exec_query_cmd(UfsRequest *req)
+{
+    uint8_t query_func = req->req_upiu.header.query_func;
+    uint16_t data_segment_length;
+    QueryRespCode status;
+
+    trace_ufs_exec_query_cmd(req->slot, req->req_upiu.qr.opcode);
+    if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST) {
+        status = ufs_exec_query_read(req);
+    } else if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST) {
+        status = ufs_exec_query_write(req);
+    } else {
+        status = UFS_QUERY_RESULT_GENERAL_FAILURE;
+    }
+
+    data_segment_length = be16_to_cpu(req->rsp_upiu.qr.length);
+    ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_QUERY_RSP, 0, status, 0,
+                          data_segment_length);
+
+    if (status != UFS_QUERY_RESULT_SUCCESS) {
+        return UFS_REQUEST_FAIL;
+    }
+    return UFS_REQUEST_SUCCESS;
+}
+
+static void ufs_exec_req(UfsRequest *req)
+{
+    UfsReqResult req_result;
+
+    if (ufs_dma_read_upiu(req)) {
+        return;
+    }
+
+    switch (req->req_upiu.header.trans_type) {
+    case UFS_UPIU_TRANSACTION_NOP_OUT:
+        req_result = ufs_exec_nop_cmd(req);
+        break;
+    case UFS_UPIU_TRANSACTION_COMMAND:
+        req_result = ufs_exec_scsi_cmd(req);
+        break;
+    case UFS_UPIU_TRANSACTION_QUERY_REQ:
+        req_result = ufs_exec_query_cmd(req);
+        break;
+    default:
+        trace_ufs_err_invalid_trans_code(req->slot,
+                                         req->req_upiu.header.trans_type);
+        req_result = UFS_REQUEST_FAIL;
+    }
+
+    /*
+     * The ufs_complete_req for scsi commands is handled by the
+     * ufs_scsi_command_complete() callback function. Therefore, to avoid
+     * duplicate processing, ufs_complete_req() is not called for scsi commands.
+     */
+    if (req_result != UFS_REQUEST_NO_COMPLETE) {
+        ufs_complete_req(req, req_result);
+    }
+}
+
+static void ufs_process_req(void *opaque)
+{
+    UfsHc *u = opaque;
+    UfsRequest *req;
+    int slot;
+
+    for (slot = 0; slot < u->params.nutrs; slot++) {
+        req = &u->req_list[slot];
+
+        if (req->state != UFS_REQUEST_READY) {
+            continue;
+        }
+        trace_ufs_process_req(slot);
+        req->state = UFS_REQUEST_RUNNING;
+
+        ufs_exec_req(req);
+    }
+}
+
+static void ufs_complete_req(UfsRequest *req, UfsReqResult req_result)
+{
+    UfsHc *u = req->hc;
+    assert(req->state == UFS_REQUEST_RUNNING);
+
+    if (req_result == UFS_REQUEST_SUCCESS) {
+        req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_SUCCESS);
+    } else {
+        req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_INVALID_CMD_TABLE_ATTR);
+    }
+
+    trace_ufs_complete_req(req->slot);
+    req->state = UFS_REQUEST_COMPLETE;
+    qemu_bh_schedule(u->complete_bh);
+}
+
+static void ufs_clear_req(UfsRequest *req)
+{
+    if (req->sg != NULL) {
+        qemu_sglist_destroy(req->sg);
+        g_free(req->sg);
+        req->sg = NULL;
+    }
+
+    memset(&req->utrd, 0, sizeof(req->utrd));
+    memset(&req->req_upiu, 0, sizeof(req->req_upiu));
+    memset(&req->rsp_upiu, 0, sizeof(req->rsp_upiu));
+}
+
+static void ufs_sendback_req(void *opaque)
+{
+    UfsHc *u = opaque;
+    UfsRequest *req;
+    int slot;
+
+    for (slot = 0; slot < u->params.nutrs; slot++) {
+        req = &u->req_list[slot];
+
+        if (req->state != UFS_REQUEST_COMPLETE) {
+            continue;
+        }
+
+        if (ufs_dma_write_upiu(req)) {
+            req->state = UFS_REQUEST_ERROR;
+            continue;
+        }
+
+        /*
+         * TODO: UTP Transfer Request Interrupt Aggregation Control is not yet
+         * supported
+         */
+        if (le32_to_cpu(req->utrd.header.dword_2) != UFS_OCS_SUCCESS ||
+            le32_to_cpu(req->utrd.header.dword_0) & UFS_UTP_REQ_DESC_INT_CMD) {
+            u->reg.is = FIELD_DP32(u->reg.is, IS, UTRCS, 1);
+        }
+
+        u->reg.utrldbr &= ~(1 << slot);
+        u->reg.utrlcnr |= (1 << slot);
+
+        trace_ufs_sendback_req(req->slot);
+
+        ufs_clear_req(req);
+        req->state = UFS_REQUEST_IDLE;
+    }
+
+    ufs_irq_check(u);
+}
+
+static bool ufs_check_constraints(UfsHc *u, Error **errp)
+{
+    if (u->params.nutrs > UFS_MAX_NUTRS) {
+        error_setg(errp, "nutrs must be less than or equal to %d",
+                   UFS_MAX_NUTRS);
+        return false;
+    }
+
+    if (u->params.nutmrs > UFS_MAX_NUTMRS) {
+        error_setg(errp, "nutmrs must be less than or equal to %d",
+                   UFS_MAX_NUTMRS);
+        return false;
+    }
+
+    return true;
+}
+
+static void ufs_init_pci(UfsHc *u, PCIDevice *pci_dev)
+{
+    uint8_t *pci_conf = pci_dev->config;
+
+    pci_conf[PCI_INTERRUPT_PIN] = 1;
+    pci_config_set_prog_interface(pci_conf, 0x1);
+
+    memory_region_init_io(&u->iomem, OBJECT(u), &ufs_mmio_ops, u, "ufs",
+                          u->reg_size);
+    pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &u->iomem);
+    u->irq = pci_allocate_irq(pci_dev);
+}
+
+static void ufs_init_state(UfsHc *u)
+{
+    u->req_list = g_new0(UfsRequest, u->params.nutrs);
+
+    for (int i = 0; i < u->params.nutrs; i++) {
+        u->req_list[i].hc = u;
+        u->req_list[i].slot = i;
+        u->req_list[i].sg = NULL;
+        u->req_list[i].state = UFS_REQUEST_IDLE;
+    }
+
+    u->doorbell_bh = qemu_bh_new_guarded(ufs_process_req, u,
+                                         &DEVICE(u)->mem_reentrancy_guard);
+    u->complete_bh = qemu_bh_new_guarded(ufs_sendback_req, u,
+                                         &DEVICE(u)->mem_reentrancy_guard);
+}
+
+static void ufs_init_hc(UfsHc *u)
+{
+    uint32_t cap = 0;
+
+    u->reg_size = pow2ceil(sizeof(UfsReg));
+
+    memset(&u->reg, 0, sizeof(u->reg));
+    cap = FIELD_DP32(cap, CAP, NUTRS, (u->params.nutrs - 1));
+    cap = FIELD_DP32(cap, CAP, RTT, 2);
+    cap = FIELD_DP32(cap, CAP, NUTMRS, (u->params.nutmrs - 1));
+    cap = FIELD_DP32(cap, CAP, AUTOH8, 0);
+    cap = FIELD_DP32(cap, CAP, 64AS, 1);
+    cap = FIELD_DP32(cap, CAP, OODDS, 0);
+    cap = FIELD_DP32(cap, CAP, UICDMETMS, 0);
+    cap = FIELD_DP32(cap, CAP, CS, 0);
+    u->reg.cap = cap;
+    u->reg.ver = UFS_SPEC_VER;
+
+    memset(&u->device_desc, 0, sizeof(DeviceDescriptor));
+    u->device_desc.length = sizeof(DeviceDescriptor);
+    u->device_desc.descriptor_idn = UFS_QUERY_DESC_IDN_DEVICE;
+    u->device_desc.device_sub_class = 0x01;
+    u->device_desc.number_lu = 0x00;
+    u->device_desc.number_wlu = 0x04;
+    /* TODO: Revisit it when Power Management is implemented */
+    u->device_desc.init_power_mode = 0x01; /* Active Mode */
+    u->device_desc.high_priority_lun = 0x7F; /* Same Priority */
+    u->device_desc.spec_version = cpu_to_be16(UFS_SPEC_VER);
+    u->device_desc.manufacturer_name = 0x00;
+    u->device_desc.product_name = 0x01;
+    u->device_desc.serial_number = 0x02;
+    u->device_desc.oem_id = 0x03;
+    u->device_desc.ud_0_base_offset = 0x16;
+    u->device_desc.ud_config_p_length = 0x1A;
+    u->device_desc.device_rtt_cap = 0x02;
+    u->device_desc.queue_depth = u->params.nutrs;
+    u->device_desc.product_revision_level = 0x04;
+
+    memset(&u->geometry_desc, 0, sizeof(GeometryDescriptor));
+    u->geometry_desc.length = sizeof(GeometryDescriptor);
+    u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY;
+    u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0;
+    u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4KB */
+    u->geometry_desc.allocation_unit_size = 0x1; /* 4KB */
+    u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */
+    u->geometry_desc.max_in_buffer_size = 0x8;
+    u->geometry_desc.max_out_buffer_size = 0x8;
+    u->geometry_desc.rpmb_read_write_size = 0x40;
+    u->geometry_desc.data_ordering =
+        0x0; /* out-of-order data transfer is not supported */
+    u->geometry_desc.max_context_id_number = 0x5;
+    u->geometry_desc.supported_memory_types = cpu_to_be16(0x8001);
+
+    memset(&u->attributes, 0, sizeof(u->attributes));
+    u->attributes.max_data_in_size = 0x08;
+    u->attributes.max_data_out_size = 0x08;
+    u->attributes.ref_clk_freq = 0x01; /* 26 MHz */
+    /* configure descriptor is not supported */
+    u->attributes.config_descr_lock = 0x01;
+    u->attributes.max_num_of_rtt = 0x02;
+
+    memset(&u->flags, 0, sizeof(u->flags));
+    u->flags.permanently_disable_fw_update = 1;
+}
+
+static bool ufs_init_wlu(UfsHc *u, UfsWLu **wlu, uint8_t wlun, Error **errp)
+{
+    UfsWLu *new_wlu = UFSWLU(qdev_new(TYPE_UFS_WLU));
+
+    qdev_prop_set_uint32(DEVICE(new_wlu), "lun", wlun);
+
+    /*
+     * The well-known lu shares the same bus as the normal lu. If the well-known
+     * lu writes the same channel value as the normal lu, the report will be
+     * made not only for the normal lu but also for the well-known lu at
+     * REPORT_LUN time. To prevent this, the channel value of normal lu is fixed
+     * to 0 and the channel value of well-known lu is fixed to 1.
+     */
+    qdev_prop_set_uint32(DEVICE(new_wlu), "channel", 1);
+    if (!qdev_realize_and_unref(DEVICE(new_wlu), BUS(&u->bus), errp)) {
+        return false;
+    }
+
+    *wlu = new_wlu;
+    return true;
+}
+
+static void ufs_realize(PCIDevice *pci_dev, Error **errp)
+{
+    UfsHc *u = UFS(pci_dev);
+
+    if (!ufs_check_constraints(u, errp)) {
+        return;
+    }
+
+    qbus_init(&u->bus, sizeof(UfsBus), TYPE_UFS_BUS, &pci_dev->qdev,
+              u->parent_obj.qdev.id);
+    u->bus.parent_bus.info = &ufs_scsi_info;
+
+    ufs_init_state(u);
+    ufs_init_hc(u);
+    ufs_init_pci(u, pci_dev);
+
+    if (!ufs_init_wlu(u, &u->report_wlu, UFS_UPIU_REPORT_LUNS_WLUN, errp)) {
+        return;
+    }
+
+    if (!ufs_init_wlu(u, &u->dev_wlu, UFS_UPIU_UFS_DEVICE_WLUN, errp)) {
+        return;
+    }
+
+    if (!ufs_init_wlu(u, &u->boot_wlu, UFS_UPIU_BOOT_WLUN, errp)) {
+        return;
+    }
+
+    if (!ufs_init_wlu(u, &u->rpmb_wlu, UFS_UPIU_RPMB_WLUN, errp)) {
+        return;
+    }
+}
+
+static void ufs_exit(PCIDevice *pci_dev)
+{
+    UfsHc *u = UFS(pci_dev);
+
+    if (u->dev_wlu) {
+        object_unref(OBJECT(u->dev_wlu));
+        u->dev_wlu = NULL;
+    }
+
+    if (u->report_wlu) {
+        object_unref(OBJECT(u->report_wlu));
+        u->report_wlu = NULL;
+    }
+
+    if (u->rpmb_wlu) {
+        object_unref(OBJECT(u->rpmb_wlu));
+        u->rpmb_wlu = NULL;
+    }
+
+    if (u->boot_wlu) {
+        object_unref(OBJECT(u->boot_wlu));
+        u->boot_wlu = NULL;
+    }
+
+    qemu_bh_delete(u->doorbell_bh);
+    qemu_bh_delete(u->complete_bh);
+
+    for (int i = 0; i < u->params.nutrs; i++) {
+        ufs_clear_req(&u->req_list[i]);
+    }
+    g_free(u->req_list);
+}
+
+static Property ufs_props[] = {
+    DEFINE_PROP_STRING("serial", UfsHc, params.serial),
+    DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32),
+    DEFINE_PROP_UINT8("nutmrs", UfsHc, params.nutmrs, 8),
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static const VMStateDescription ufs_vmstate = {
+    .name = "ufs",
+    .unmigratable = 1,
+};
+
+static void ufs_class_init(ObjectClass *oc, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(oc);
+    PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
+
+    pc->realize = ufs_realize;
+    pc->exit = ufs_exit;
+    pc->vendor_id = PCI_VENDOR_ID_REDHAT;
+    pc->device_id = PCI_DEVICE_ID_REDHAT_UFS;
+    pc->class_id = PCI_CLASS_STORAGE_UFS;
+
+    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+    dc->desc = "Universal Flash Storage";
+    device_class_set_props(dc, ufs_props);
+    dc->vmsd = &ufs_vmstate;
+}
+
+static bool ufs_bus_check_address(BusState *qbus, DeviceState *qdev,
+                                  Error **errp)
+{
+    SCSIDevice *dev = SCSI_DEVICE(qdev);
+    UfsBusClass *ubc = UFS_BUS_GET_CLASS(qbus);
+    UfsHc *u = UFS(qbus->parent);
+
+    if (strcmp(object_get_typename(OBJECT(dev)), TYPE_UFS_WLU) == 0) {
+        if (dev->lun != UFS_UPIU_REPORT_LUNS_WLUN &&
+            dev->lun != UFS_UPIU_UFS_DEVICE_WLUN &&
+            dev->lun != UFS_UPIU_BOOT_WLUN && dev->lun != UFS_UPIU_RPMB_WLUN) {
+            error_setg(errp, "bad well-known lun: %d", dev->lun);
+            return false;
+        }
+
+        if ((dev->lun == UFS_UPIU_REPORT_LUNS_WLUN && u->report_wlu != NULL) ||
+            (dev->lun == UFS_UPIU_UFS_DEVICE_WLUN && u->dev_wlu != NULL) ||
+            (dev->lun == UFS_UPIU_BOOT_WLUN && u->boot_wlu != NULL) ||
+            (dev->lun == UFS_UPIU_RPMB_WLUN && u->rpmb_wlu != NULL)) {
+            error_setg(errp, "well-known lun %d already exists", dev->lun);
+            return false;
+        }
+
+        return true;
+    }
+
+    if (strcmp(object_get_typename(OBJECT(dev)), TYPE_UFS_LU) != 0) {
+        error_setg(errp, "%s cannot be connected to ufs-bus",
+                   object_get_typename(OBJECT(dev)));
+        return false;
+    }
+
+    return ubc->parent_check_address(qbus, qdev, errp);
+}
+
+static void ufs_bus_class_init(ObjectClass *class, void *data)
+{
+    BusClass *bc = BUS_CLASS(class);
+    UfsBusClass *ubc = UFS_BUS_CLASS(class);
+    ubc->parent_check_address = bc->check_address;
+    bc->check_address = ufs_bus_check_address;
+}
+
+static const TypeInfo ufs_info = {
+    .name = TYPE_UFS,
+    .parent = TYPE_PCI_DEVICE,
+    .class_init = ufs_class_init,
+    .instance_size = sizeof(UfsHc),
+    .interfaces = (InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} },
+};
+
+static const TypeInfo ufs_bus_info = {
+    .name = TYPE_UFS_BUS,
+    .parent = TYPE_SCSI_BUS,
+    .class_init = ufs_bus_class_init,
+    .class_size = sizeof(UfsBusClass),
+    .instance_size = sizeof(UfsBus),
+};
+
+static void ufs_register_types(void)
+{
+    type_register_static(&ufs_info);
+    type_register_static(&ufs_bus_info);
+}
+
+type_init(ufs_register_types)
diff --git a/hw/ufs/ufs.h b/hw/ufs/ufs.h
new file mode 100644 (file)
index 0000000..f244228
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * QEMU UFS
+ *
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Written by Jeuk Kim <jeuk20.kim@samsung.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_UFS_UFS_H
+#define HW_UFS_UFS_H
+
+#include "hw/pci/pci_device.h"
+#include "hw/scsi/scsi.h"
+#include "block/ufs.h"
+
+#define UFS_MAX_LUS 32
+#define UFS_BLOCK_SIZE 4096
+
+typedef struct UfsBusClass {
+    BusClass parent_class;
+    bool (*parent_check_address)(BusState *bus, DeviceState *dev, Error **errp);
+} UfsBusClass;
+
+typedef struct UfsBus {
+    SCSIBus parent_bus;
+} UfsBus;
+
+#define TYPE_UFS_BUS "ufs-bus"
+DECLARE_OBJ_CHECKERS(UfsBus, UfsBusClass, UFS_BUS, TYPE_UFS_BUS)
+
+typedef enum UfsRequestState {
+    UFS_REQUEST_IDLE = 0,
+    UFS_REQUEST_READY = 1,
+    UFS_REQUEST_RUNNING = 2,
+    UFS_REQUEST_COMPLETE = 3,
+    UFS_REQUEST_ERROR = 4,
+} UfsRequestState;
+
+typedef enum UfsReqResult {
+    UFS_REQUEST_SUCCESS = 0,
+    UFS_REQUEST_FAIL = 1,
+    UFS_REQUEST_NO_COMPLETE = 2,
+} UfsReqResult;
+
+typedef struct UfsRequest {
+    struct UfsHc *hc;
+    UfsRequestState state;
+    int slot;
+
+    UtpTransferReqDesc utrd;
+    UtpUpiuReq req_upiu;
+    UtpUpiuRsp rsp_upiu;
+
+    /* for scsi command */
+    QEMUSGList *sg;
+} UfsRequest;
+
+typedef struct UfsLu {
+    SCSIDevice qdev;
+    uint8_t lun;
+    UnitDescriptor unit_desc;
+} UfsLu;
+
+typedef struct UfsWLu {
+    SCSIDevice qdev;
+    uint8_t lun;
+} UfsWLu;
+
+typedef struct UfsParams {
+    char *serial;
+    uint8_t nutrs; /* Number of UTP Transfer Request Slots */
+    uint8_t nutmrs; /* Number of UTP Task Management Request Slots */
+} UfsParams;
+
+typedef struct UfsHc {
+    PCIDevice parent_obj;
+    UfsBus bus;
+    MemoryRegion iomem;
+    UfsReg reg;
+    UfsParams params;
+    uint32_t reg_size;
+    UfsRequest *req_list;
+
+    UfsLu *lus[UFS_MAX_LUS];
+    UfsWLu *report_wlu;
+    UfsWLu *dev_wlu;
+    UfsWLu *boot_wlu;
+    UfsWLu *rpmb_wlu;
+    DeviceDescriptor device_desc;
+    GeometryDescriptor geometry_desc;
+    Attributes attributes;
+    Flags flags;
+
+    qemu_irq irq;
+    QEMUBH *doorbell_bh;
+    QEMUBH *complete_bh;
+} UfsHc;
+
+#define TYPE_UFS "ufs"
+#define UFS(obj) OBJECT_CHECK(UfsHc, (obj), TYPE_UFS)
+
+#define TYPE_UFS_LU "ufs-lu"
+#define UFSLU(obj) OBJECT_CHECK(UfsLu, (obj), TYPE_UFS_LU)
+
+#define TYPE_UFS_WLU "ufs-wlu"
+#define UFSWLU(obj) OBJECT_CHECK(UfsWLu, (obj), TYPE_UFS_WLU)
+
+typedef enum UfsQueryFlagPerm {
+    UFS_QUERY_FLAG_NONE = 0x0,
+    UFS_QUERY_FLAG_READ = 0x1,
+    UFS_QUERY_FLAG_SET = 0x2,
+    UFS_QUERY_FLAG_CLEAR = 0x4,
+    UFS_QUERY_FLAG_TOGGLE = 0x8,
+} UfsQueryFlagPerm;
+
+typedef enum UfsQueryAttrPerm {
+    UFS_QUERY_ATTR_NONE = 0x0,
+    UFS_QUERY_ATTR_READ = 0x1,
+    UFS_QUERY_ATTR_WRITE = 0x2,
+} UfsQueryAttrPerm;
+
+static inline bool is_wlun(uint8_t lun)
+{
+    return (lun == UFS_UPIU_REPORT_LUNS_WLUN ||
+            lun == UFS_UPIU_UFS_DEVICE_WLUN || lun == UFS_UPIU_BOOT_WLUN ||
+            lun == UFS_UPIU_RPMB_WLUN);
+}
+
+#endif /* HW_UFS_UFS_H */
index 4428bcffbb9fd840a5fc39da2714ff0549cc1107..f672b76173bafd24e35e3e9863b930bdd7f14cec 100644 (file)
@@ -324,8 +324,7 @@ typedef struct NBDExportInfo {
     char **contexts;
 } NBDExportInfo;
 
-int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
-                          QCryptoTLSCreds *tlscreds,
+int nbd_receive_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
                           const char *hostname, QIOChannel **outioc,
                           NBDExportInfo *info, Error **errp);
 void nbd_free_export_list(NBDExportInfo *info, int count);
diff --git a/include/block/ufs.h b/include/block/ufs.h
new file mode 100644 (file)
index 0000000..fd884eb
--- /dev/null
@@ -0,0 +1,1090 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef BLOCK_UFS_H
+#define BLOCK_UFS_H
+
+#include "hw/registerfields.h"
+
+typedef struct QEMU_PACKED UfsReg {
+    uint32_t cap;
+    uint32_t rsvd0;
+    uint32_t ver;
+    uint32_t rsvd1;
+    uint32_t hcpid;
+    uint32_t hcmid;
+    uint32_t ahit;
+    uint32_t rsvd2;
+    uint32_t is;
+    uint32_t ie;
+    uint32_t rsvd3[2];
+    uint32_t hcs;
+    uint32_t hce;
+    uint32_t uecpa;
+    uint32_t uecdl;
+    uint32_t uecn;
+    uint32_t uect;
+    uint32_t uecdme;
+    uint32_t utriacr;
+    uint32_t utrlba;
+    uint32_t utrlbau;
+    uint32_t utrldbr;
+    uint32_t utrlclr;
+    uint32_t utrlrsr;
+    uint32_t utrlcnr;
+    uint32_t rsvd4[2];
+    uint32_t utmrlba;
+    uint32_t utmrlbau;
+    uint32_t utmrldbr;
+    uint32_t utmrlclr;
+    uint32_t utmrlrsr;
+    uint32_t rsvd5[3];
+    uint32_t uiccmd;
+    uint32_t ucmdarg1;
+    uint32_t ucmdarg2;
+    uint32_t ucmdarg3;
+    uint32_t rsvd6[4];
+    uint32_t rsvd7[4];
+    uint32_t rsvd8[16];
+    uint32_t ccap;
+} UfsReg;
+
+REG32(CAP, offsetof(UfsReg, cap))
+    FIELD(CAP, NUTRS, 0, 5)
+    FIELD(CAP, RTT, 8, 8)
+    FIELD(CAP, NUTMRS, 16, 3)
+    FIELD(CAP, AUTOH8, 23, 1)
+    FIELD(CAP, 64AS, 24, 1)
+    FIELD(CAP, OODDS, 25, 1)
+    FIELD(CAP, UICDMETMS, 26, 1)
+    FIELD(CAP, CS, 28, 1)
+REG32(VER, offsetof(UfsReg, ver))
+REG32(HCPID, offsetof(UfsReg, hcpid))
+REG32(HCMID, offsetof(UfsReg, hcmid))
+REG32(AHIT, offsetof(UfsReg, ahit))
+REG32(IS, offsetof(UfsReg, is))
+    FIELD(IS, UTRCS, 0, 1)
+    FIELD(IS, UDEPRI, 1, 1)
+    FIELD(IS, UE, 2, 1)
+    FIELD(IS, UTMS, 3, 1)
+    FIELD(IS, UPMS, 4, 1)
+    FIELD(IS, UHXS, 5, 1)
+    FIELD(IS, UHES, 6, 1)
+    FIELD(IS, ULLS, 7, 1)
+    FIELD(IS, ULSS, 8, 1)
+    FIELD(IS, UTMRCS, 9, 1)
+    FIELD(IS, UCCS, 10, 1)
+    FIELD(IS, DFES, 11, 1)
+    FIELD(IS, UTPES, 12, 1)
+    FIELD(IS, HCFES, 16, 1)
+    FIELD(IS, SBFES, 17, 1)
+    FIELD(IS, CEFES, 18, 1)
+REG32(IE, offsetof(UfsReg, ie))
+    FIELD(IE, UTRCE, 0, 1)
+    FIELD(IE, UDEPRIE, 1, 1)
+    FIELD(IE, UEE, 2, 1)
+    FIELD(IE, UTMSE, 3, 1)
+    FIELD(IE, UPMSE, 4, 1)
+    FIELD(IE, UHXSE, 5, 1)
+    FIELD(IE, UHESE, 6, 1)
+    FIELD(IE, ULLSE, 7, 1)
+    FIELD(IE, ULSSE, 8, 1)
+    FIELD(IE, UTMRCE, 9, 1)
+    FIELD(IE, UCCE, 10, 1)
+    FIELD(IE, DFEE, 11, 1)
+    FIELD(IE, UTPEE, 12, 1)
+    FIELD(IE, HCFEE, 16, 1)
+    FIELD(IE, SBFEE, 17, 1)
+    FIELD(IE, CEFEE, 18, 1)
+REG32(HCS, offsetof(UfsReg, hcs))
+    FIELD(HCS, DP, 0, 1)
+    FIELD(HCS, UTRLRDY, 1, 1)
+    FIELD(HCS, UTMRLRDY, 2, 1)
+    FIELD(HCS, UCRDY, 3, 1)
+    FIELD(HCS, UPMCRS, 8, 3)
+REG32(HCE, offsetof(UfsReg, hce))
+    FIELD(HCE, HCE, 0, 1)
+    FIELD(HCE, CGE, 1, 1)
+REG32(UECPA, offsetof(UfsReg, uecpa))
+REG32(UECDL, offsetof(UfsReg, uecdl))
+REG32(UECN, offsetof(UfsReg, uecn))
+REG32(UECT, offsetof(UfsReg, uect))
+REG32(UECDME, offsetof(UfsReg, uecdme))
+REG32(UTRIACR, offsetof(UfsReg, utriacr))
+REG32(UTRLBA, offsetof(UfsReg, utrlba))
+    FIELD(UTRLBA, UTRLBA, 9, 22)
+REG32(UTRLBAU, offsetof(UfsReg, utrlbau))
+REG32(UTRLDBR, offsetof(UfsReg, utrldbr))
+REG32(UTRLCLR, offsetof(UfsReg, utrlclr))
+REG32(UTRLRSR, offsetof(UfsReg, utrlrsr))
+REG32(UTRLCNR, offsetof(UfsReg, utrlcnr))
+REG32(UTMRLBA, offsetof(UfsReg, utmrlba))
+    FIELD(UTMRLBA, UTMRLBA, 9, 22)
+REG32(UTMRLBAU, offsetof(UfsReg, utmrlbau))
+REG32(UTMRLDBR, offsetof(UfsReg, utmrldbr))
+REG32(UTMRLCLR, offsetof(UfsReg, utmrlclr))
+REG32(UTMRLRSR, offsetof(UfsReg, utmrlrsr))
+REG32(UICCMD, offsetof(UfsReg, uiccmd))
+REG32(UCMDARG1, offsetof(UfsReg, ucmdarg1))
+REG32(UCMDARG2, offsetof(UfsReg, ucmdarg2))
+REG32(UCMDARG3, offsetof(UfsReg, ucmdarg3))
+REG32(CCAP, offsetof(UfsReg, ccap))
+
+#define UFS_INTR_MASK                                    \
+    ((1 << R_IS_CEFES_SHIFT) | (1 << R_IS_SBFES_SHIFT) | \
+     (1 << R_IS_HCFES_SHIFT) | (1 << R_IS_UTPES_SHIFT) | \
+     (1 << R_IS_DFES_SHIFT) | (1 << R_IS_UCCS_SHIFT) |   \
+     (1 << R_IS_UTMRCS_SHIFT) | (1 << R_IS_ULSS_SHIFT) | \
+     (1 << R_IS_ULLS_SHIFT) | (1 << R_IS_UHES_SHIFT) |   \
+     (1 << R_IS_UHXS_SHIFT) | (1 << R_IS_UPMS_SHIFT) |   \
+     (1 << R_IS_UTMS_SHIFT) | (1 << R_IS_UE_SHIFT) |     \
+     (1 << R_IS_UDEPRI_SHIFT) | (1 << R_IS_UTRCS_SHIFT))
+
+#define UFS_UPIU_HEADER_TRANSACTION_TYPE_SHIFT 24
+#define UFS_UPIU_HEADER_TRANSACTION_TYPE_MASK 0xff
+#define UFS_UPIU_HEADER_TRANSACTION_TYPE(dword0)                       \
+    ((be32_to_cpu(dword0) >> UFS_UPIU_HEADER_TRANSACTION_TYPE_SHIFT) & \
+     UFS_UPIU_HEADER_TRANSACTION_TYPE_MASK)
+
+#define UFS_UPIU_HEADER_QUERY_FUNC_SHIFT 16
+#define UFS_UPIU_HEADER_QUERY_FUNC_MASK 0xff
+#define UFS_UPIU_HEADER_QUERY_FUNC(dword1)                       \
+    ((be32_to_cpu(dword1) >> UFS_UPIU_HEADER_QUERY_FUNC_SHIFT) & \
+     UFS_UPIU_HEADER_QUERY_FUNC_MASK)
+
+#define UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_SHIFT 0
+#define UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_MASK 0xffff
+#define UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH(dword2)                       \
+    ((be32_to_cpu(dword2) >> UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_SHIFT) & \
+     UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_MASK)
+
+typedef struct QEMU_PACKED DeviceDescriptor {
+    uint8_t length;
+    uint8_t descriptor_idn;
+    uint8_t device;
+    uint8_t device_class;
+    uint8_t device_sub_class;
+    uint8_t protocol;
+    uint8_t number_lu;
+    uint8_t number_wlu;
+    uint8_t boot_enable;
+    uint8_t descr_access_en;
+    uint8_t init_power_mode;
+    uint8_t high_priority_lun;
+    uint8_t secure_removal_type;
+    uint8_t security_lu;
+    uint8_t background_ops_term_lat;
+    uint8_t init_active_icc_level;
+    uint16_t spec_version;
+    uint16_t manufacture_date;
+    uint8_t manufacturer_name;
+    uint8_t product_name;
+    uint8_t serial_number;
+    uint8_t oem_id;
+    uint16_t manufacturer_id;
+    uint8_t ud_0_base_offset;
+    uint8_t ud_config_p_length;
+    uint8_t device_rtt_cap;
+    uint16_t periodic_rtc_update;
+    uint8_t ufs_features_support;
+    uint8_t ffu_timeout;
+    uint8_t queue_depth;
+    uint16_t device_version;
+    uint8_t num_secure_wp_area;
+    uint32_t psa_max_data_size;
+    uint8_t psa_state_timeout;
+    uint8_t product_revision_level;
+    uint8_t reserved[36];
+    uint32_t extended_ufs_features_support;
+    uint8_t write_booster_buffer_preserve_user_space_en;
+    uint8_t write_booster_buffer_type;
+    uint32_t num_shared_write_booster_buffer_alloc_units;
+} DeviceDescriptor;
+
+typedef struct QEMU_PACKED GeometryDescriptor {
+    uint8_t length;
+    uint8_t descriptor_idn;
+    uint8_t media_technology;
+    uint8_t reserved;
+    uint64_t total_raw_device_capacity;
+    uint8_t max_number_lu;
+    uint32_t segment_size;
+    uint8_t allocation_unit_size;
+    uint8_t min_addr_block_size;
+    uint8_t optimal_read_block_size;
+    uint8_t optimal_write_block_size;
+    uint8_t max_in_buffer_size;
+    uint8_t max_out_buffer_size;
+    uint8_t rpmb_read_write_size;
+    uint8_t dynamic_capacity_resource_policy;
+    uint8_t data_ordering;
+    uint8_t max_context_id_number;
+    uint8_t sys_data_tag_unit_size;
+    uint8_t sys_data_tag_res_size;
+    uint8_t supported_sec_r_types;
+    uint16_t supported_memory_types;
+    uint32_t system_code_max_n_alloc_u;
+    uint16_t system_code_cap_adj_fac;
+    uint32_t non_persist_max_n_alloc_u;
+    uint16_t non_persist_cap_adj_fac;
+    uint32_t enhanced_1_max_n_alloc_u;
+    uint16_t enhanced_1_cap_adj_fac;
+    uint32_t enhanced_2_max_n_alloc_u;
+    uint16_t enhanced_2_cap_adj_fac;
+    uint32_t enhanced_3_max_n_alloc_u;
+    uint16_t enhanced_3_cap_adj_fac;
+    uint32_t enhanced_4_max_n_alloc_u;
+    uint16_t enhanced_4_cap_adj_fac;
+    uint32_t optimal_logical_block_size;
+    uint8_t reserved2[7];
+    uint32_t write_booster_buffer_max_n_alloc_units;
+    uint8_t device_max_write_booster_l_us;
+    uint8_t write_booster_buffer_cap_adj_fac;
+    uint8_t supported_write_booster_buffer_user_space_reduction_types;
+    uint8_t supported_write_booster_buffer_types;
+} GeometryDescriptor;
+
+#define UFS_GEOMETRY_CAPACITY_SHIFT 9
+
+typedef struct QEMU_PACKED UnitDescriptor {
+    uint8_t length;
+    uint8_t descriptor_idn;
+    uint8_t unit_index;
+    uint8_t lu_enable;
+    uint8_t boot_lun_id;
+    uint8_t lu_write_protect;
+    uint8_t lu_queue_depth;
+    uint8_t psa_sensitive;
+    uint8_t memory_type;
+    uint8_t data_reliability;
+    uint8_t logical_block_size;
+    uint64_t logical_block_count;
+    uint32_t erase_block_size;
+    uint8_t provisioning_type;
+    uint64_t phy_mem_resource_count;
+    uint16_t context_capabilities;
+    uint8_t large_unit_granularity_m1;
+    uint8_t reserved[6];
+    uint32_t lu_num_write_booster_buffer_alloc_units;
+} UnitDescriptor;
+
+typedef struct QEMU_PACKED RpmbUnitDescriptor {
+    uint8_t length;
+    uint8_t descriptor_idn;
+    uint8_t unit_index;
+    uint8_t lu_enable;
+    uint8_t boot_lun_id;
+    uint8_t lu_write_protect;
+    uint8_t lu_queue_depth;
+    uint8_t psa_sensitive;
+    uint8_t memory_type;
+    uint8_t reserved;
+    uint8_t logical_block_size;
+    uint64_t logical_block_count;
+    uint32_t erase_block_size;
+    uint8_t provisioning_type;
+    uint64_t phy_mem_resource_count;
+    uint8_t reserved2[3];
+} RpmbUnitDescriptor;
+
+typedef struct QEMU_PACKED PowerParametersDescriptor {
+    uint8_t length;
+    uint8_t descriptor_idn;
+    uint16_t active_icc_levels_vcc[16];
+    uint16_t active_icc_levels_vccq[16];
+    uint16_t active_icc_levels_vccq_2[16];
+} PowerParametersDescriptor;
+
+typedef struct QEMU_PACKED InterconnectDescriptor {
+    uint8_t length;
+    uint8_t descriptor_idn;
+    uint16_t bcd_unipro_version;
+    uint16_t bcd_mphy_version;
+} InterconnectDescriptor;
+
+typedef struct QEMU_PACKED StringDescriptor {
+    uint8_t length;
+    uint8_t descriptor_idn;
+    uint16_t UC[126];
+} StringDescriptor;
+
+typedef struct QEMU_PACKED DeviceHealthDescriptor {
+    uint8_t length;
+    uint8_t descriptor_idn;
+    uint8_t pre_eol_info;
+    uint8_t device_life_time_est_a;
+    uint8_t device_life_time_est_b;
+    uint8_t vendor_prop_info[32];
+    uint32_t refresh_total_count;
+    uint32_t refresh_progress;
+} DeviceHealthDescriptor;
+
+typedef struct QEMU_PACKED Flags {
+    uint8_t reserved;
+    uint8_t device_init;
+    uint8_t permanent_wp_en;
+    uint8_t power_on_wp_en;
+    uint8_t background_ops_en;
+    uint8_t device_life_span_mode_en;
+    uint8_t purge_enable;
+    uint8_t refresh_enable;
+    uint8_t phy_resource_removal;
+    uint8_t busy_rtc;
+    uint8_t reserved2;
+    uint8_t permanently_disable_fw_update;
+    uint8_t reserved3[2];
+    uint8_t wb_en;
+    uint8_t wb_buffer_flush_en;
+    uint8_t wb_buffer_flush_during_hibernate;
+    uint8_t reserved4[2];
+} Flags;
+
+typedef struct Attributes {
+    uint8_t boot_lun_en;
+    uint8_t reserved;
+    uint8_t current_power_mode;
+    uint8_t active_icc_level;
+    uint8_t out_of_order_data_en;
+    uint8_t background_op_status;
+    uint8_t purge_status;
+    uint8_t max_data_in_size;
+    uint8_t max_data_out_size;
+    uint32_t dyn_cap_needed;
+    uint8_t ref_clk_freq;
+    uint8_t config_descr_lock;
+    uint8_t max_num_of_rtt;
+    uint16_t exception_event_control;
+    uint16_t exception_event_status;
+    uint32_t seconds_passed;
+    uint16_t context_conf;
+    uint8_t device_ffu_status;
+    uint8_t psa_state;
+    uint32_t psa_data_size;
+    uint8_t ref_clk_gating_wait_time;
+    uint8_t device_case_rough_temperaure;
+    uint8_t device_too_high_temp_boundary;
+    uint8_t device_too_low_temp_boundary;
+    uint8_t throttling_status;
+    uint8_t wb_buffer_flush_status;
+    uint8_t available_wb_buffer_size;
+    uint8_t wb_buffer_life_time_est;
+    uint32_t current_wb_buffer_size;
+    uint8_t refresh_status;
+    uint8_t refresh_freq;
+    uint8_t refresh_unit;
+    uint8_t refresh_method;
+} Attributes;
+
+#define UFS_TRANSACTION_SPECIFIC_FIELD_SIZE 20
+#define UFS_MAX_QUERY_DATA_SIZE 256
+
+/* Command response result code */
+typedef enum CommandRespCode {
+    UFS_COMMAND_RESULT_SUCESS = 0x00,
+    UFS_COMMAND_RESULT_FAIL = 0x01,
+} CommandRespCode;
+
+enum {
+    UFS_UPIU_FLAG_UNDERFLOW = 0x20,
+    UFS_UPIU_FLAG_OVERFLOW = 0x40,
+};
+
+typedef struct QEMU_PACKED UtpUpiuHeader {
+    uint8_t trans_type;
+    uint8_t flags;
+    uint8_t lun;
+    uint8_t task_tag;
+    uint8_t iid_cmd_set_type;
+    uint8_t query_func;
+    uint8_t response;
+    uint8_t scsi_status;
+    uint8_t ehs_len;
+    uint8_t device_inf;
+    uint16_t data_segment_length;
+} UtpUpiuHeader;
+
+/*
+ * The code below is copied from the linux kernel
+ * ("include/uapi/scsi/scsi_bsg_ufs.h") and modified to fit the qemu style.
+ */
+
+typedef struct QEMU_PACKED UtpUpiuQuery {
+    uint8_t opcode;
+    uint8_t idn;
+    uint8_t index;
+    uint8_t selector;
+    uint16_t reserved_osf;
+    uint16_t length;
+    uint32_t value;
+    uint32_t reserved[2];
+    /* EHS length should be 0. We don't have to worry about EHS area. */
+    uint8_t data[UFS_MAX_QUERY_DATA_SIZE];
+} UtpUpiuQuery;
+
+#define UFS_CDB_SIZE 16
+
+/*
+ * struct UtpUpiuCmd - Command UPIU structure
+ * @data_transfer_len: Data Transfer Length DW-3
+ * @cdb: Command Descriptor Block CDB DW-4 to DW-7
+ */
+typedef struct QEMU_PACKED UtpUpiuCmd {
+    uint32_t exp_data_transfer_len;
+    uint8_t cdb[UFS_CDB_SIZE];
+} UtpUpiuCmd;
+
+/*
+ * struct UtpUpiuReq - general upiu request structure
+ * @header:UPIU header structure DW-0 to DW-2
+ * @sc: fields structure for scsi command DW-3 to DW-7
+ * @qr: fields structure for query request DW-3 to DW-7
+ * @uc: use utp_upiu_query to host the 4 dwords of uic command
+ */
+typedef struct QEMU_PACKED UtpUpiuReq {
+    UtpUpiuHeader header;
+    union {
+        UtpUpiuCmd sc;
+        UtpUpiuQuery qr;
+    };
+} UtpUpiuReq;
+
+/*
+ * The code below is copied from the linux kernel ("include/ufs/ufshci.h") and
+ * modified to fit the qemu style.
+ */
+
+enum {
+    UFS_PWR_OK = 0x0,
+    UFS_PWR_LOCAL = 0x01,
+    UFS_PWR_REMOTE = 0x02,
+    UFS_PWR_BUSY = 0x03,
+    UFS_PWR_ERROR_CAP = 0x04,
+    UFS_PWR_FATAL_ERROR = 0x05,
+};
+
+/* UIC Commands */
+enum uic_cmd_dme {
+    UFS_UIC_CMD_DME_GET = 0x01,
+    UFS_UIC_CMD_DME_SET = 0x02,
+    UFS_UIC_CMD_DME_PEER_GET = 0x03,
+    UFS_UIC_CMD_DME_PEER_SET = 0x04,
+    UFS_UIC_CMD_DME_POWERON = 0x10,
+    UFS_UIC_CMD_DME_POWEROFF = 0x11,
+    UFS_UIC_CMD_DME_ENABLE = 0x12,
+    UFS_UIC_CMD_DME_RESET = 0x14,
+    UFS_UIC_CMD_DME_END_PT_RST = 0x15,
+    UFS_UIC_CMD_DME_LINK_STARTUP = 0x16,
+    UFS_UIC_CMD_DME_HIBER_ENTER = 0x17,
+    UFS_UIC_CMD_DME_HIBER_EXIT = 0x18,
+    UFS_UIC_CMD_DME_TEST_MODE = 0x1A,
+};
+
+/* UIC Config result code / Generic error code */
+enum {
+    UFS_UIC_CMD_RESULT_SUCCESS = 0x00,
+    UFS_UIC_CMD_RESULT_INVALID_ATTR = 0x01,
+    UFS_UIC_CMD_RESULT_FAILURE = 0x01,
+    UFS_UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02,
+    UFS_UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03,
+    UFS_UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04,
+    UFS_UIC_CMD_RESULT_BAD_INDEX = 0x05,
+    UFS_UIC_CMD_RESULT_LOCKED_ATTR = 0x06,
+    UFS_UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07,
+    UFS_UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08,
+    UFS_UIC_CMD_RESULT_BUSY = 0x09,
+    UFS_UIC_CMD_RESULT_DME_FAILURE = 0x0A,
+};
+
+#define UFS_MASK_UIC_COMMAND_RESULT 0xFF
+
+/*
+ * Request Descriptor Definitions
+ */
+
+/* Transfer request command type */
+enum {
+    UFS_UTP_CMD_TYPE_SCSI = 0x0,
+    UFS_UTP_CMD_TYPE_UFS = 0x1,
+    UFS_UTP_CMD_TYPE_DEV_MANAGE = 0x2,
+};
+
+/* To accommodate UFS2.0 required Command type */
+enum {
+    UFS_UTP_CMD_TYPE_UFS_STORAGE = 0x1,
+};
+
+enum {
+    UFS_UTP_SCSI_COMMAND = 0x00000000,
+    UFS_UTP_NATIVE_UFS_COMMAND = 0x10000000,
+    UFS_UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
+    UFS_UTP_REQ_DESC_INT_CMD = 0x01000000,
+    UFS_UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000,
+};
+
+/* UTP Transfer Request Data Direction (DD) */
+enum {
+    UFS_UTP_NO_DATA_TRANSFER = 0x00000000,
+    UFS_UTP_HOST_TO_DEVICE = 0x02000000,
+    UFS_UTP_DEVICE_TO_HOST = 0x04000000,
+};
+
+/* Overall command status values */
+enum UtpOcsCodes {
+    UFS_OCS_SUCCESS = 0x0,
+    UFS_OCS_INVALID_CMD_TABLE_ATTR = 0x1,
+    UFS_OCS_INVALID_PRDT_ATTR = 0x2,
+    UFS_OCS_MISMATCH_DATA_BUF_SIZE = 0x3,
+    UFS_OCS_MISMATCH_RESP_UPIU_SIZE = 0x4,
+    UFS_OCS_PEER_COMM_FAILURE = 0x5,
+    UFS_OCS_ABORTED = 0x6,
+    UFS_OCS_FATAL_ERROR = 0x7,
+    UFS_OCS_DEVICE_FATAL_ERROR = 0x8,
+    UFS_OCS_INVALID_CRYPTO_CONFIG = 0x9,
+    UFS_OCS_GENERAL_CRYPTO_ERROR = 0xa,
+    UFS_OCS_INVALID_COMMAND_STATUS = 0xf,
+};
+
+enum {
+    UFS_MASK_OCS = 0x0F,
+};
+
+/*
+ * struct UfshcdSgEntry - UFSHCI PRD Entry
+ * @addr: Physical address; DW-0 and DW-1.
+ * @reserved: Reserved for future use DW-2
+ * @size: size of physical segment DW-3
+ */
+typedef struct QEMU_PACKED UfshcdSgEntry {
+    uint64_t addr;
+    uint32_t reserved;
+    uint32_t size;
+    /*
+     * followed by variant-specific fields if
+     * CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE has been defined.
+     */
+} UfshcdSgEntry;
+
+/*
+ * struct RequestDescHeader - Descriptor Header common to both UTRD and UTMRD
+ * @dword0: Descriptor Header DW0
+ * @dword1: Descriptor Header DW1
+ * @dword2: Descriptor Header DW2
+ * @dword3: Descriptor Header DW3
+ */
+typedef struct QEMU_PACKED RequestDescHeader {
+    uint32_t dword_0;
+    uint32_t dword_1;
+    uint32_t dword_2;
+    uint32_t dword_3;
+} RequestDescHeader;
+
+/*
+ * struct UtpTransferReqDesc - UTP Transfer Request Descriptor (UTRD)
+ * @header: UTRD header DW-0 to DW-3
+ * @command_desc_base_addr_lo: UCD base address low DW-4
+ * @command_desc_base_addr_hi: UCD base address high DW-5
+ * @response_upiu_length: response UPIU length DW-6
+ * @response_upiu_offset: response UPIU offset DW-6
+ * @prd_table_length: Physical region descriptor length DW-7
+ * @prd_table_offset: Physical region descriptor offset DW-7
+ */
+typedef struct QEMU_PACKED UtpTransferReqDesc {
+    /* DW 0-3 */
+    RequestDescHeader header;
+
+    /* DW 4-5*/
+    uint32_t command_desc_base_addr_lo;
+    uint32_t command_desc_base_addr_hi;
+
+    /* DW 6 */
+    uint16_t response_upiu_length;
+    uint16_t response_upiu_offset;
+
+    /* DW 7 */
+    uint16_t prd_table_length;
+    uint16_t prd_table_offset;
+} UtpTransferReqDesc;
+
+/*
+ * UTMRD structure.
+ */
+typedef struct QEMU_PACKED UtpTaskReqDesc {
+    /* DW 0-3 */
+    RequestDescHeader header;
+
+    /* DW 4-11 - Task request UPIU structure */
+    struct {
+        UtpUpiuHeader req_header;
+        uint32_t input_param1;
+        uint32_t input_param2;
+        uint32_t input_param3;
+        uint32_t reserved1[2];
+    } upiu_req;
+
+    /* DW 12-19 - Task Management Response UPIU structure */
+    struct {
+        UtpUpiuHeader rsp_header;
+        uint32_t output_param1;
+        uint32_t output_param2;
+        uint32_t reserved2[3];
+    } upiu_rsp;
+} UtpTaskReqDesc;
+
+/*
+ * The code below is copied from the linux kernel ("include/ufs/ufs.h") and
+ * modified to fit the qemu style.
+ */
+
+#define UFS_GENERAL_UPIU_REQUEST_SIZE (sizeof(UtpUpiuReq))
+#define UFS_QUERY_DESC_MAX_SIZE 255
+#define UFS_QUERY_DESC_MIN_SIZE 2
+#define UFS_QUERY_DESC_HDR_SIZE 2
+#define UFS_QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - (sizeof(UtpUpiuHeader)))
+#define UFS_SENSE_SIZE 18
+
+/*
+ * UFS device may have standard LUs and LUN id could be from 0x00 to
+ * 0x7F. Standard LUs use "Peripheral Device Addressing Format".
+ * UFS device may also have the Well Known LUs (also referred as W-LU)
+ * which again could be from 0x00 to 0x7F. For W-LUs, device only use
+ * the "Extended Addressing Format" which means the W-LUNs would be
+ * from 0xc100 (SCSI_W_LUN_BASE) onwards.
+ * This means max. LUN number reported from UFS device could be 0xC17F.
+ */
+#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
+#define UFS_UPIU_WLUN_ID (1 << 7)
+
+/* WriteBooster buffer is available only for the logical unit from 0 to 7 */
+#define UFS_UPIU_MAX_WB_LUN_ID 8
+
+/*
+ * WriteBooster buffer lifetime has a limit setted by vendor.
+ * If it is over the limit, WriteBooster feature will be disabled.
+ */
+#define UFS_WB_EXCEED_LIFETIME 0x0B
+
+/*
+ * In UFS Spec, the Extra Header Segment (EHS) starts from byte 32 in UPIU
+ * request/response packet
+ */
+#define UFS_EHS_OFFSET_IN_RESPONSE 32
+
+/* Well known logical unit id in LUN field of UPIU */
+enum {
+    UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
+    UFS_UPIU_UFS_DEVICE_WLUN = 0xD0,
+    UFS_UPIU_BOOT_WLUN = 0xB0,
+    UFS_UPIU_RPMB_WLUN = 0xC4,
+};
+
+/*
+ * UFS Protocol Information Unit related definitions
+ */
+
+/* Task management functions */
+enum {
+    UFS_ABORT_TASK = 0x01,
+    UFS_ABORT_TASK_SET = 0x02,
+    UFS_CLEAR_TASK_SET = 0x04,
+    UFS_LOGICAL_RESET = 0x08,
+    UFS_QUERY_TASK = 0x80,
+    UFS_QUERY_TASK_SET = 0x81,
+};
+
+/* UTP UPIU Transaction Codes Initiator to Target */
+enum {
+    UFS_UPIU_TRANSACTION_NOP_OUT = 0x00,
+    UFS_UPIU_TRANSACTION_COMMAND = 0x01,
+    UFS_UPIU_TRANSACTION_DATA_OUT = 0x02,
+    UFS_UPIU_TRANSACTION_TASK_REQ = 0x04,
+    UFS_UPIU_TRANSACTION_QUERY_REQ = 0x16,
+};
+
+/* UTP UPIU Transaction Codes Target to Initiator */
+enum {
+    UFS_UPIU_TRANSACTION_NOP_IN = 0x20,
+    UFS_UPIU_TRANSACTION_RESPONSE = 0x21,
+    UFS_UPIU_TRANSACTION_DATA_IN = 0x22,
+    UFS_UPIU_TRANSACTION_TASK_RSP = 0x24,
+    UFS_UPIU_TRANSACTION_READY_XFER = 0x31,
+    UFS_UPIU_TRANSACTION_QUERY_RSP = 0x36,
+    UFS_UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
+};
+
+/* UPIU Read/Write flags */
+enum {
+    UFS_UPIU_CMD_FLAGS_NONE = 0x00,
+    UFS_UPIU_CMD_FLAGS_WRITE = 0x20,
+    UFS_UPIU_CMD_FLAGS_READ = 0x40,
+};
+
+/* UPIU Task Attributes */
+enum {
+    UFS_UPIU_TASK_ATTR_SIMPLE = 0x00,
+    UFS_UPIU_TASK_ATTR_ORDERED = 0x01,
+    UFS_UPIU_TASK_ATTR_HEADQ = 0x02,
+    UFS_UPIU_TASK_ATTR_ACA = 0x03,
+};
+
+/* UPIU Query request function */
+enum {
+    UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
+    UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
+};
+
+/* Flag idn for Query Requests*/
+enum flag_idn {
+    UFS_QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
+    UFS_QUERY_FLAG_IDN_PERMANENT_WPE = 0x02,
+    UFS_QUERY_FLAG_IDN_PWR_ON_WPE = 0x03,
+    UFS_QUERY_FLAG_IDN_BKOPS_EN = 0x04,
+    UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE = 0x05,
+    UFS_QUERY_FLAG_IDN_PURGE_ENABLE = 0x06,
+    UFS_QUERY_FLAG_IDN_REFRESH_ENABLE = 0x07,
+    UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL = 0x08,
+    UFS_QUERY_FLAG_IDN_BUSY_RTC = 0x09,
+    UFS_QUERY_FLAG_IDN_RESERVED3 = 0x0A,
+    UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
+    UFS_QUERY_FLAG_IDN_WB_EN = 0x0E,
+    UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
+    UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
+    UFS_QUERY_FLAG_IDN_HPB_RESET = 0x11,
+    UFS_QUERY_FLAG_IDN_HPB_EN = 0x12,
+    UFS_QUERY_FLAG_IDN_COUNT,
+};
+
+/* Attribute idn for Query requests */
+enum attr_idn {
+    UFS_QUERY_ATTR_IDN_BOOT_LU_EN = 0x00,
+    UFS_QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 0x01,
+    UFS_QUERY_ATTR_IDN_POWER_MODE = 0x02,
+    UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
+    UFS_QUERY_ATTR_IDN_OOO_DATA_EN = 0x04,
+    UFS_QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
+    UFS_QUERY_ATTR_IDN_PURGE_STATUS = 0x06,
+    UFS_QUERY_ATTR_IDN_MAX_DATA_IN = 0x07,
+    UFS_QUERY_ATTR_IDN_MAX_DATA_OUT = 0x08,
+    UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED = 0x09,
+    UFS_QUERY_ATTR_IDN_REF_CLK_FREQ = 0x0A,
+    UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK = 0x0B,
+    UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT = 0x0C,
+    UFS_QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
+    UFS_QUERY_ATTR_IDN_EE_STATUS = 0x0E,
+    UFS_QUERY_ATTR_IDN_SECONDS_PASSED = 0x0F,
+    UFS_QUERY_ATTR_IDN_CNTX_CONF = 0x10,
+    UFS_QUERY_ATTR_IDN_CORR_PRG_BLK_NUM = 0x11,
+    UFS_QUERY_ATTR_IDN_RESERVED2 = 0x12,
+    UFS_QUERY_ATTR_IDN_RESERVED3 = 0x13,
+    UFS_QUERY_ATTR_IDN_FFU_STATUS = 0x14,
+    UFS_QUERY_ATTR_IDN_PSA_STATE = 0x15,
+    UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
+    UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
+    UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP = 0x18,
+    UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND = 0x19,
+    UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND = 0x1A,
+    UFS_QUERY_ATTR_IDN_THROTTLING_STATUS = 0x1B,
+    UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C,
+    UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
+    UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
+    UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
+    UFS_QUERY_ATTR_IDN_REFRESH_STATUS = 0x2C,
+    UFS_QUERY_ATTR_IDN_REFRESH_FREQ = 0x2D,
+    UFS_QUERY_ATTR_IDN_REFRESH_UNIT = 0x2E,
+    UFS_QUERY_ATTR_IDN_COUNT,
+};
+
+/* Descriptor idn for Query requests */
+enum desc_idn {
+    UFS_QUERY_DESC_IDN_DEVICE = 0x0,
+    UFS_QUERY_DESC_IDN_CONFIGURATION = 0x1,
+    UFS_QUERY_DESC_IDN_UNIT = 0x2,
+    UFS_QUERY_DESC_IDN_RFU_0 = 0x3,
+    UFS_QUERY_DESC_IDN_INTERCONNECT = 0x4,
+    UFS_QUERY_DESC_IDN_STRING = 0x5,
+    UFS_QUERY_DESC_IDN_RFU_1 = 0x6,
+    UFS_QUERY_DESC_IDN_GEOMETRY = 0x7,
+    UFS_QUERY_DESC_IDN_POWER = 0x8,
+    UFS_QUERY_DESC_IDN_HEALTH = 0x9,
+    UFS_QUERY_DESC_IDN_MAX,
+};
+
+enum desc_header_offset {
+    UFS_QUERY_DESC_LENGTH_OFFSET = 0x00,
+    UFS_QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
+};
+
+/* Unit descriptor parameters offsets in bytes*/
+enum unit_desc_param {
+    UFS_UNIT_DESC_PARAM_LEN = 0x0,
+    UFS_UNIT_DESC_PARAM_TYPE = 0x1,
+    UFS_UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
+    UFS_UNIT_DESC_PARAM_LU_ENABLE = 0x3,
+    UFS_UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
+    UFS_UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
+    UFS_UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
+    UFS_UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7,
+    UFS_UNIT_DESC_PARAM_MEM_TYPE = 0x8,
+    UFS_UNIT_DESC_PARAM_DATA_RELIABILITY = 0x9,
+    UFS_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
+    UFS_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
+    UFS_UNIT_DESC_PARAM_ERASE_BLK_SIZE = 0x13,
+    UFS_UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
+    UFS_UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
+    UFS_UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
+    UFS_UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
+    UFS_UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 0x23,
+    UFS_UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 0x25,
+    UFS_UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 0x27,
+    UFS_UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
+};
+
+/* RPMB Unit descriptor parameters offsets in bytes*/
+enum rpmb_unit_desc_param {
+    UFS_RPMB_UNIT_DESC_PARAM_LEN = 0x0,
+    UFS_RPMB_UNIT_DESC_PARAM_TYPE = 0x1,
+    UFS_RPMB_UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
+    UFS_RPMB_UNIT_DESC_PARAM_LU_ENABLE = 0x3,
+    UFS_RPMB_UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
+    UFS_RPMB_UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
+    UFS_RPMB_UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
+    UFS_RPMB_UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7,
+    UFS_RPMB_UNIT_DESC_PARAM_MEM_TYPE = 0x8,
+    UFS_RPMB_UNIT_DESC_PARAM_REGION_EN = 0x9,
+    UFS_RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
+    UFS_RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
+    UFS_RPMB_UNIT_DESC_PARAM_REGION0_SIZE = 0x13,
+    UFS_RPMB_UNIT_DESC_PARAM_REGION1_SIZE = 0x14,
+    UFS_RPMB_UNIT_DESC_PARAM_REGION2_SIZE = 0x15,
+    UFS_RPMB_UNIT_DESC_PARAM_REGION3_SIZE = 0x16,
+    UFS_RPMB_UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
+    UFS_RPMB_UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
+};
+
+/* Device descriptor parameters offsets in bytes*/
+enum device_desc_param {
+    UFS_DEVICE_DESC_PARAM_LEN = 0x0,
+    UFS_DEVICE_DESC_PARAM_TYPE = 0x1,
+    UFS_DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2,
+    UFS_DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3,
+    UFS_DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4,
+    UFS_DEVICE_DESC_PARAM_PRTCL = 0x5,
+    UFS_DEVICE_DESC_PARAM_NUM_LU = 0x6,
+    UFS_DEVICE_DESC_PARAM_NUM_WLU = 0x7,
+    UFS_DEVICE_DESC_PARAM_BOOT_ENBL = 0x8,
+    UFS_DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9,
+    UFS_DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA,
+    UFS_DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB,
+    UFS_DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC,
+    UFS_DEVICE_DESC_PARAM_SEC_LU = 0xD,
+    UFS_DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE,
+    UFS_DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF,
+    UFS_DEVICE_DESC_PARAM_SPEC_VER = 0x10,
+    UFS_DEVICE_DESC_PARAM_MANF_DATE = 0x12,
+    UFS_DEVICE_DESC_PARAM_MANF_NAME = 0x14,
+    UFS_DEVICE_DESC_PARAM_PRDCT_NAME = 0x15,
+    UFS_DEVICE_DESC_PARAM_SN = 0x16,
+    UFS_DEVICE_DESC_PARAM_OEM_ID = 0x17,
+    UFS_DEVICE_DESC_PARAM_MANF_ID = 0x18,
+    UFS_DEVICE_DESC_PARAM_UD_OFFSET = 0x1A,
+    UFS_DEVICE_DESC_PARAM_UD_LEN = 0x1B,
+    UFS_DEVICE_DESC_PARAM_RTT_CAP = 0x1C,
+    UFS_DEVICE_DESC_PARAM_FRQ_RTC = 0x1D,
+    UFS_DEVICE_DESC_PARAM_UFS_FEAT = 0x1F,
+    UFS_DEVICE_DESC_PARAM_FFU_TMT = 0x20,
+    UFS_DEVICE_DESC_PARAM_Q_DPTH = 0x21,
+    UFS_DEVICE_DESC_PARAM_DEV_VER = 0x22,
+    UFS_DEVICE_DESC_PARAM_NUM_SEC_WPA = 0x24,
+    UFS_DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
+    UFS_DEVICE_DESC_PARAM_PSA_TMT = 0x29,
+    UFS_DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
+    UFS_DEVICE_DESC_PARAM_HPB_VER = 0x40,
+    UFS_DEVICE_DESC_PARAM_HPB_CONTROL = 0x42,
+    UFS_DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F,
+    UFS_DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53,
+    UFS_DEVICE_DESC_PARAM_WB_TYPE = 0x54,
+    UFS_DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS = 0x55,
+};
+
+/* Interconnect descriptor parameters offsets in bytes*/
+enum interconnect_desc_param {
+    UFS_INTERCONNECT_DESC_PARAM_LEN = 0x0,
+    UFS_INTERCONNECT_DESC_PARAM_TYPE = 0x1,
+    UFS_INTERCONNECT_DESC_PARAM_UNIPRO_VER = 0x2,
+    UFS_INTERCONNECT_DESC_PARAM_MPHY_VER = 0x4,
+};
+
+/* Geometry descriptor parameters offsets in bytes*/
+enum geometry_desc_param {
+    UFS_GEOMETRY_DESC_PARAM_LEN = 0x0,
+    UFS_GEOMETRY_DESC_PARAM_TYPE = 0x1,
+    UFS_GEOMETRY_DESC_PARAM_DEV_CAP = 0x4,
+    UFS_GEOMETRY_DESC_PARAM_MAX_NUM_LUN = 0xC,
+    UFS_GEOMETRY_DESC_PARAM_SEG_SIZE = 0xD,
+    UFS_GEOMETRY_DESC_PARAM_ALLOC_UNIT_SIZE = 0x11,
+    UFS_GEOMETRY_DESC_PARAM_MIN_BLK_SIZE = 0x12,
+    UFS_GEOMETRY_DESC_PARAM_OPT_RD_BLK_SIZE = 0x13,
+    UFS_GEOMETRY_DESC_PARAM_OPT_WR_BLK_SIZE = 0x14,
+    UFS_GEOMETRY_DESC_PARAM_MAX_IN_BUF_SIZE = 0x15,
+    UFS_GEOMETRY_DESC_PARAM_MAX_OUT_BUF_SIZE = 0x16,
+    UFS_GEOMETRY_DESC_PARAM_RPMB_RW_SIZE = 0x17,
+    UFS_GEOMETRY_DESC_PARAM_DYN_CAP_RSRC_PLC = 0x18,
+    UFS_GEOMETRY_DESC_PARAM_DATA_ORDER = 0x19,
+    UFS_GEOMETRY_DESC_PARAM_MAX_NUM_CTX = 0x1A,
+    UFS_GEOMETRY_DESC_PARAM_TAG_UNIT_SIZE = 0x1B,
+    UFS_GEOMETRY_DESC_PARAM_TAG_RSRC_SIZE = 0x1C,
+    UFS_GEOMETRY_DESC_PARAM_SEC_RM_TYPES = 0x1D,
+    UFS_GEOMETRY_DESC_PARAM_MEM_TYPES = 0x1E,
+    UFS_GEOMETRY_DESC_PARAM_SCM_MAX_NUM_UNITS = 0x20,
+    UFS_GEOMETRY_DESC_PARAM_SCM_CAP_ADJ_FCTR = 0x24,
+    UFS_GEOMETRY_DESC_PARAM_NPM_MAX_NUM_UNITS = 0x26,
+    UFS_GEOMETRY_DESC_PARAM_NPM_CAP_ADJ_FCTR = 0x2A,
+    UFS_GEOMETRY_DESC_PARAM_ENM1_MAX_NUM_UNITS = 0x2C,
+    UFS_GEOMETRY_DESC_PARAM_ENM1_CAP_ADJ_FCTR = 0x30,
+    UFS_GEOMETRY_DESC_PARAM_ENM2_MAX_NUM_UNITS = 0x32,
+    UFS_GEOMETRY_DESC_PARAM_ENM2_CAP_ADJ_FCTR = 0x36,
+    UFS_GEOMETRY_DESC_PARAM_ENM3_MAX_NUM_UNITS = 0x38,
+    UFS_GEOMETRY_DESC_PARAM_ENM3_CAP_ADJ_FCTR = 0x3C,
+    UFS_GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
+    UFS_GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
+    UFS_GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
+    UFS_GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 0x48,
+    UFS_GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 0x49,
+    UFS_GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 0x4A,
+    UFS_GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B,
+    UFS_GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F,
+    UFS_GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53,
+    UFS_GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54,
+    UFS_GEOMETRY_DESC_PARAM_WB_SUP_RED_TYPE = 0x55,
+    UFS_GEOMETRY_DESC_PARAM_WB_SUP_WB_TYPE = 0x56,
+};
+
+/* Health descriptor parameters offsets in bytes*/
+enum health_desc_param {
+    UFS_HEALTH_DESC_PARAM_LEN = 0x0,
+    UFS_HEALTH_DESC_PARAM_TYPE = 0x1,
+    UFS_HEALTH_DESC_PARAM_EOL_INFO = 0x2,
+    UFS_HEALTH_DESC_PARAM_LIFE_TIME_EST_A = 0x3,
+    UFS_HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
+};
+
+/* WriteBooster buffer mode */
+enum {
+    UFS_WB_BUF_MODE_LU_DEDICATED = 0x0,
+    UFS_WB_BUF_MODE_SHARED = 0x1,
+};
+
+/*
+ * Logical Unit Write Protect
+ * 00h: LU not write protected
+ * 01h: LU write protected when fPowerOnWPEn =1
+ * 02h: LU permanently write protected when fPermanentWPEn =1
+ */
+enum ufs_lu_wp_type {
+    UFS_LU_NO_WP = 0x00,
+    UFS_LU_POWER_ON_WP = 0x01,
+    UFS_LU_PERM_WP = 0x02,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum query_opcode {
+    UFS_UPIU_QUERY_OPCODE_NOP = 0x0,
+    UFS_UPIU_QUERY_OPCODE_READ_DESC = 0x1,
+    UFS_UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
+    UFS_UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
+    UFS_UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
+    UFS_UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
+    UFS_UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
+    UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
+    UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
+};
+
+/* Query response result code */
+typedef enum QueryRespCode {
+    UFS_QUERY_RESULT_SUCCESS = 0x00,
+    UFS_QUERY_RESULT_NOT_READABLE = 0xF6,
+    UFS_QUERY_RESULT_NOT_WRITEABLE = 0xF7,
+    UFS_QUERY_RESULT_ALREADY_WRITTEN = 0xF8,
+    UFS_QUERY_RESULT_INVALID_LENGTH = 0xF9,
+    UFS_QUERY_RESULT_INVALID_VALUE = 0xFA,
+    UFS_QUERY_RESULT_INVALID_SELECTOR = 0xFB,
+    UFS_QUERY_RESULT_INVALID_INDEX = 0xFC,
+    UFS_QUERY_RESULT_INVALID_IDN = 0xFD,
+    UFS_QUERY_RESULT_INVALID_OPCODE = 0xFE,
+    UFS_QUERY_RESULT_GENERAL_FAILURE = 0xFF,
+} QueryRespCode;
+
+/* UTP Transfer Request Command Type (CT) */
+enum {
+    UFS_UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
+    UFS_UPIU_COMMAND_SET_TYPE_UFS = 0x1,
+    UFS_UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
+};
+
+/* Task management service response */
+enum {
+    UFS_UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00,
+    UFS_UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04,
+    UFS_UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08,
+    UFS_UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05,
+    UFS_UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09,
+};
+
+/* UFS device power modes */
+enum ufs_dev_pwr_mode {
+    UFS_ACTIVE_PWR_MODE = 1,
+    UFS_SLEEP_PWR_MODE = 2,
+    UFS_POWERDOWN_PWR_MODE = 3,
+    UFS_DEEPSLEEP_PWR_MODE = 4,
+};
+
+/*
+ * struct UtpCmdRsp - Response UPIU structure
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @sense_data: Sense data field DW-8 to DW-12
+ */
+typedef struct QEMU_PACKED UtpCmdRsp {
+    uint32_t residual_transfer_count;
+    uint32_t reserved[4];
+    uint16_t sense_data_len;
+    uint8_t sense_data[UFS_SENSE_SIZE];
+} UtpCmdRsp;
+
+/*
+ * struct UtpUpiuRsp - general upiu response structure
+ * @header: UPIU header structure DW-0 to DW-2
+ * @sr: fields structure for scsi command DW-3 to DW-12
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+typedef struct QEMU_PACKED UtpUpiuRsp {
+    UtpUpiuHeader header;
+    union {
+        UtpCmdRsp sr;
+        UtpUpiuQuery qr;
+    };
+} UtpUpiuRsp;
+
+static inline void _ufs_check_size(void)
+{
+    QEMU_BUILD_BUG_ON(sizeof(UfsReg) != 0x104);
+    QEMU_BUILD_BUG_ON(sizeof(DeviceDescriptor) != 89);
+    QEMU_BUILD_BUG_ON(sizeof(GeometryDescriptor) != 87);
+    QEMU_BUILD_BUG_ON(sizeof(UnitDescriptor) != 45);
+    QEMU_BUILD_BUG_ON(sizeof(RpmbUnitDescriptor) != 35);
+    QEMU_BUILD_BUG_ON(sizeof(PowerParametersDescriptor) != 98);
+    QEMU_BUILD_BUG_ON(sizeof(InterconnectDescriptor) != 6);
+    QEMU_BUILD_BUG_ON(sizeof(StringDescriptor) != 254);
+    QEMU_BUILD_BUG_ON(sizeof(DeviceHealthDescriptor) != 45);
+    QEMU_BUILD_BUG_ON(sizeof(Flags) != 0x13);
+    QEMU_BUILD_BUG_ON(sizeof(UtpUpiuHeader) != 12);
+    QEMU_BUILD_BUG_ON(sizeof(UtpUpiuQuery) != 276);
+    QEMU_BUILD_BUG_ON(sizeof(UtpUpiuCmd) != 20);
+    QEMU_BUILD_BUG_ON(sizeof(UtpUpiuReq) != 288);
+    QEMU_BUILD_BUG_ON(sizeof(UfshcdSgEntry) != 16);
+    QEMU_BUILD_BUG_ON(sizeof(RequestDescHeader) != 16);
+    QEMU_BUILD_BUG_ON(sizeof(UtpTransferReqDesc) != 32);
+    QEMU_BUILD_BUG_ON(sizeof(UtpTaskReqDesc) != 80);
+    QEMU_BUILD_BUG_ON(sizeof(UtpCmdRsp) != 40);
+    QEMU_BUILD_BUG_ON(sizeof(UtpUpiuRsp) != 288);
+}
+#endif
index abdc1ef103f9f7bf9a6535468734c258d0d4c102..b70a0b95ff5ae367ed7f98483ec8d1d1b6274530 100644 (file)
@@ -114,6 +114,7 @@ extern bool pci_available;
 #define PCI_DEVICE_ID_REDHAT_NVME        0x0010
 #define PCI_DEVICE_ID_REDHAT_PVPANIC     0x0011
 #define PCI_DEVICE_ID_REDHAT_ACPI_ERST   0x0012
+#define PCI_DEVICE_ID_REDHAT_UFS         0x0013
 #define PCI_DEVICE_ID_REDHAT_QXL         0x0100
 
 #define FMT_PCIBUS                      PRIx64
index e4386ebb20385437b4b9ceec5bdbb8be0d7bb3e7..85469b9b538496ff3e6dfbb2ff2ce168ecc34b3d 100644 (file)
@@ -26,6 +26,7 @@
 #define PCI_CLASS_STORAGE_SATA           0x0106
 #define PCI_CLASS_STORAGE_SAS            0x0107
 #define PCI_CLASS_STORAGE_EXPRESS        0x0108
+#define PCI_CLASS_STORAGE_UFS            0x0109
 #define PCI_CLASS_STORAGE_OTHER          0x0180
 
 #define PCI_BASE_CLASS_NETWORK           0x02
index a5d720d9a04bf48d13c6e7983bc45f23fe3b8f7b..fa18a3756d8b7678d3661cac6c5838d8ca9e53ac 100644 (file)
 QIOChannel *qio_channel_new_fd(int fd,
                                Error **errp);
 
+/**
+ * qio_channel_util_set_aio_fd_handler:
+ * @read_fd: the file descriptor for the read handler
+ * @read_ctx: the AioContext for the read handler
+ * @io_read: the read handler
+ * @write_fd: the file descriptor for the write handler
+ * @write_ctx: the AioContext for the write handler
+ * @io_write: the write handler
+ * @opaque: the opaque argument to the read and write handler
+ *
+ * Set the read and write handlers when @read_ctx and @write_ctx are non-NULL,
+ * respectively. To leave a handler in its current state, pass a NULL
+ * AioContext. To clear a handler, pass a non-NULL AioContext and a NULL
+ * handler.
+ */
+void qio_channel_util_set_aio_fd_handler(int read_fd,
+                                         AioContext *read_ctx,
+                                         IOHandler *io_read,
+                                         int write_fd,
+                                         AioContext *write_ctx,
+                                         IOHandler *io_write,
+                                         void *opaque);
+
 #endif /* QIO_CHANNEL_UTIL_H */
index 229bf36910f5567cd25d79bba66a3837c0bfff39..5f9dbaab65b09b6dd36c9a298905cc16f9b2674d 100644 (file)
@@ -81,9 +81,11 @@ struct QIOChannel {
     Object parent;
     unsigned int features; /* bitmask of QIOChannelFeatures */
     char *name;
-    AioContext *ctx;
+    AioContext *read_ctx;
     Coroutine *read_coroutine;
+    AioContext *write_ctx;
     Coroutine *write_coroutine;
+    bool follow_coroutine_ctx;
 #ifdef _WIN32
     HANDLE event; /* For use with GSource on Win32 */
 #endif
@@ -140,8 +142,9 @@ struct QIOChannelClass {
                      int whence,
                      Error **errp);
     void (*io_set_aio_fd_handler)(QIOChannel *ioc,
-                                  AioContext *ctx,
+                                  AioContext *read_ctx,
                                   IOHandler *io_read,
+                                  AioContext *write_ctx,
                                   IOHandler *io_write,
                                   void *opaque);
     int (*io_flush)(QIOChannel *ioc,
@@ -498,6 +501,21 @@ int qio_channel_set_blocking(QIOChannel *ioc,
                              bool enabled,
                              Error **errp);
 
+/**
+ * qio_channel_set_follow_coroutine_ctx:
+ * @ioc: the channel object
+ * @enabled: whether or not to follow the coroutine's AioContext
+ *
+ * If @enabled is true, calls to qio_channel_yield() use the current
+ * coroutine's AioContext. Usually this is desirable.
+ *
+ * If @enabled is false, calls to qio_channel_yield() use the global iohandler
+ * AioContext. This is may be used by coroutines that run in the main loop and
+ * do not wish to respond to I/O during nested event loops. This is the
+ * default for compatibility with code that is not aware of AioContexts.
+ */
+void qio_channel_set_follow_coroutine_ctx(QIOChannel *ioc, bool enabled);
+
 /**
  * qio_channel_close:
  * @ioc: the channel object
@@ -703,41 +721,6 @@ GSource *qio_channel_add_watch_source(QIOChannel *ioc,
                                       GDestroyNotify notify,
                                       GMainContext *context);
 
-/**
- * qio_channel_attach_aio_context:
- * @ioc: the channel object
- * @ctx: the #AioContext to set the handlers on
- *
- * Request that qio_channel_yield() sets I/O handlers on
- * the given #AioContext.  If @ctx is %NULL, qio_channel_yield()
- * uses QEMU's main thread event loop.
- *
- * You can move a #QIOChannel from one #AioContext to another even if
- * I/O handlers are set for a coroutine.  However, #QIOChannel provides
- * no synchronization between the calls to qio_channel_yield() and
- * qio_channel_attach_aio_context().
- *
- * Therefore you should first call qio_channel_detach_aio_context()
- * to ensure that the coroutine is not entered concurrently.  Then,
- * while the coroutine has yielded, call qio_channel_attach_aio_context(),
- * and then aio_co_schedule() to place the coroutine on the new
- * #AioContext.  The calls to qio_channel_detach_aio_context()
- * and qio_channel_attach_aio_context() should be protected with
- * aio_context_acquire() and aio_context_release().
- */
-void qio_channel_attach_aio_context(QIOChannel *ioc,
-                                    AioContext *ctx);
-
-/**
- * qio_channel_detach_aio_context:
- * @ioc: the channel object
- *
- * Disable any I/O handlers set by qio_channel_yield().  With the
- * help of aio_co_schedule(), this allows moving a coroutine that was
- * paused by qio_channel_yield() to another context.
- */
-void qio_channel_detach_aio_context(QIOChannel *ioc);
-
 /**
  * qio_channel_yield:
  * @ioc: the channel object
@@ -785,8 +768,9 @@ void qio_channel_wait(QIOChannel *ioc,
 /**
  * qio_channel_set_aio_fd_handler:
  * @ioc: the channel object
- * @ctx: the AioContext to set the handlers on
+ * @read_ctx: the AioContext to set the read handler on or NULL
  * @io_read: the read handler
+ * @write_ctx: the AioContext to set the write handler on or NULL
  * @io_write: the write handler
  * @opaque: the opaque value passed to the handler
  *
@@ -794,10 +778,17 @@ void qio_channel_wait(QIOChannel *ioc,
  * be used by channel implementations to forward the handlers
  * to another channel (e.g. from #QIOChannelTLS to the
  * underlying socket).
+ *
+ * When @read_ctx is NULL, don't touch the read handler. When @write_ctx is
+ * NULL, don't touch the write handler. Note that setting the read handler
+ * clears the write handler, and vice versa, if they share the same AioContext.
+ * Therefore the caller must pass both handlers together when sharing the same
+ * AioContext.
  */
 void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
-                                    AioContext *ctx,
+                                    AioContext *read_ctx,
                                     IOHandler *io_read,
+                                    AioContext *write_ctx,
                                     IOHandler *io_write,
                                     void *opaque);
 
index b1c1cda8862727d828fbc8b770af2acb99df37a4..64ad70101576ccf60ab6d15e7cc03c92008c068d 100644 (file)
@@ -43,6 +43,7 @@ typedef struct {
     unsigned int in_flight; /* atomic */
 
     /* Protected by ctx lock */
+    bool in_qio_channel_yield;
     bool wait_idle;
     VuDev vu_dev;
     QIOChannel *ioc; /* The I/O channel with the client */
index 6a8bad556a4fafc112bdceda3528ea21101468ac..9b984519121f7a4a3ed19f1962d3ba2d2c58161e 100644 (file)
 #define MODE_PAGE_FLEXIBLE_DISK_GEOMETRY      0x05
 #define MODE_PAGE_CACHING                     0x08
 #define MODE_PAGE_AUDIO_CTL                   0x0e
+#define MODE_PAGE_CONTROL                     0x0a
 #define MODE_PAGE_POWER                       0x1a
 #define MODE_PAGE_FAULT_FAIL                  0x1c
 #define MODE_PAGE_TO_PROTECT                  0x1d
index 7ed726c8025b9b8ced6a1719173f06b7f969dbdf..6d5f64e146d45c7ed330063711b5295816591a38 100644 (file)
@@ -20,6 +20,7 @@
 
 #include "qemu/osdep.h"
 #include "io/channel-command.h"
+#include "io/channel-util.h"
 #include "io/channel-watch.h"
 #include "qapi/error.h"
 #include "qemu/module.h"
@@ -331,14 +332,17 @@ static int qio_channel_command_close(QIOChannel *ioc,
 
 
 static void qio_channel_command_set_aio_fd_handler(QIOChannel *ioc,
-                                                   AioContext *ctx,
+                                                   AioContext *read_ctx,
                                                    IOHandler *io_read,
+                                                   AioContext *write_ctx,
                                                    IOHandler *io_write,
                                                    void *opaque)
 {
     QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc);
-    aio_set_fd_handler(ctx, cioc->readfd, io_read, NULL, NULL, NULL, opaque);
-    aio_set_fd_handler(ctx, cioc->writefd, NULL, io_write, NULL, NULL, opaque);
+
+    qio_channel_util_set_aio_fd_handler(cioc->readfd, read_ctx, io_read,
+                                        cioc->writefd, write_ctx, io_write,
+                                        opaque);
 }
 
 
index 8b5821f4526ed8850a77a9fad4bf17aeb792bc11..4a12c618860116cbdf61c8d64db0e469f8f6a75b 100644 (file)
@@ -20,6 +20,7 @@
 
 #include "qemu/osdep.h"
 #include "io/channel-file.h"
+#include "io/channel-util.h"
 #include "io/channel-watch.h"
 #include "qapi/error.h"
 #include "qemu/module.h"
@@ -192,13 +193,17 @@ static int qio_channel_file_close(QIOChannel *ioc,
 
 
 static void qio_channel_file_set_aio_fd_handler(QIOChannel *ioc,
-                                                AioContext *ctx,
+                                                AioContext *read_ctx,
                                                 IOHandler *io_read,
+                                                AioContext *write_ctx,
                                                 IOHandler *io_write,
                                                 void *opaque)
 {
     QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc);
-    aio_set_fd_handler(ctx, fioc->fd, io_read, io_write, NULL, NULL, opaque);
+
+    qio_channel_util_set_aio_fd_handler(fioc->fd, read_ctx, io_read,
+                                        fioc->fd, write_ctx, io_write,
+                                        opaque);
 }
 
 static GSource *qio_channel_file_create_watch(QIOChannel *ioc,
index 4fafdb770da1fa0ec9e544c402daacc9c031a708..ef995863483cc6f7eb30bdaea15b95d6514df0f3 100644 (file)
@@ -128,8 +128,9 @@ qio_channel_null_close(QIOChannel *ioc,
 
 static void
 qio_channel_null_set_aio_fd_handler(QIOChannel *ioc G_GNUC_UNUSED,
-                                    AioContext *ctx G_GNUC_UNUSED,
+                                    AioContext *read_ctx G_GNUC_UNUSED,
                                     IOHandler *io_read G_GNUC_UNUSED,
+                                    AioContext *write_ctx G_GNUC_UNUSED,
                                     IOHandler *io_write G_GNUC_UNUSED,
                                     void *opaque G_GNUC_UNUSED)
 {
index d99945ebec494e32d023f7a567d69433b26cbcb5..02ffb51e995747bddd6c824423b67be311ce2dc6 100644 (file)
@@ -22,6 +22,7 @@
 #include "qapi/qapi-visit-sockets.h"
 #include "qemu/module.h"
 #include "io/channel-socket.h"
+#include "io/channel-util.h"
 #include "io/channel-watch.h"
 #include "trace.h"
 #include "qapi/clone-visitor.h"
@@ -893,13 +894,17 @@ qio_channel_socket_shutdown(QIOChannel *ioc,
 }
 
 static void qio_channel_socket_set_aio_fd_handler(QIOChannel *ioc,
-                                                  AioContext *ctx,
+                                                  AioContext *read_ctx,
                                                   IOHandler *io_read,
+                                                  AioContext *write_ctx,
                                                   IOHandler *io_write,
                                                   void *opaque)
 {
     QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
-    aio_set_fd_handler(ctx, sioc->fd, io_read, io_write, NULL, NULL, opaque);
+
+    qio_channel_util_set_aio_fd_handler(sioc->fd, read_ctx, io_read,
+                                        sioc->fd, write_ctx, io_write,
+                                        opaque);
 }
 
 static GSource *qio_channel_socket_create_watch(QIOChannel *ioc,
index 847d5297c339a690efc1b2f74746be5db2f627ab..58fe1aceeeafd473dfb3fde267f18eccee6bdb65 100644 (file)
@@ -388,14 +388,16 @@ static int qio_channel_tls_close(QIOChannel *ioc,
 }
 
 static void qio_channel_tls_set_aio_fd_handler(QIOChannel *ioc,
-                                               AioContext *ctx,
+                                               AioContext *read_ctx,
                                                IOHandler *io_read,
+                                               AioContext *write_ctx,
                                                IOHandler *io_write,
                                                void *opaque)
 {
     QIOChannelTLS *tioc = QIO_CHANNEL_TLS(ioc);
 
-    qio_channel_set_aio_fd_handler(tioc->master, ctx, io_read, io_write, opaque);
+    qio_channel_set_aio_fd_handler(tioc->master, read_ctx, io_read,
+            write_ctx, io_write, opaque);
 }
 
 typedef struct QIOChannelTLSSource QIOChannelTLSSource;
index 848a7a43d671daa6d7257e6fa525ea55a06ebc09..4b340d46d760f245c2622e6c93f95867f449f47a 100644 (file)
@@ -36,3 +36,27 @@ QIOChannel *qio_channel_new_fd(int fd,
     }
     return ioc;
 }
+
+
+void qio_channel_util_set_aio_fd_handler(int read_fd,
+                                         AioContext *read_ctx,
+                                         IOHandler *io_read,
+                                         int write_fd,
+                                         AioContext *write_ctx,
+                                         IOHandler *io_write,
+                                         void *opaque)
+{
+    if (read_fd == write_fd && read_ctx == write_ctx) {
+        aio_set_fd_handler(read_ctx, read_fd, io_read, io_write,
+                NULL, NULL, opaque);
+    } else {
+        if (read_ctx) {
+            aio_set_fd_handler(read_ctx, read_fd, io_read, NULL,
+                    NULL, NULL, opaque);
+        }
+        if (write_ctx) {
+            aio_set_fd_handler(write_ctx, write_fd, NULL, io_write,
+                    NULL, NULL, opaque);
+        }
+    }
+}
index 72f0066af55a430d3912a2b04ee6124cdcffc7d5..86c5834510ff1af08c5756257a71df11c04756bd 100644 (file)
@@ -365,6 +365,12 @@ int qio_channel_set_blocking(QIOChannel *ioc,
 }
 
 
+void qio_channel_set_follow_coroutine_ctx(QIOChannel *ioc, bool enabled)
+{
+    ioc->follow_coroutine_ctx = enabled;
+}
+
+
 int qio_channel_close(QIOChannel *ioc,
                       Error **errp)
 {
@@ -388,14 +394,16 @@ GSource *qio_channel_create_watch(QIOChannel *ioc,
 
 
 void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
-                                    AioContext *ctx,
+                                    AioContext *read_ctx,
                                     IOHandler *io_read,
+                                    AioContext *write_ctx,
                                     IOHandler *io_write,
                                     void *opaque)
 {
     QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
 
-    klass->io_set_aio_fd_handler(ioc, ctx, io_read, io_write, opaque);
+    klass->io_set_aio_fd_handler(ioc, read_ctx, io_read, write_ctx, io_write,
+            opaque);
 }
 
 guint qio_channel_add_watch_full(QIOChannel *ioc,
@@ -542,56 +550,101 @@ static void qio_channel_restart_write(void *opaque)
     aio_co_wake(co);
 }
 
-static void qio_channel_set_aio_fd_handlers(QIOChannel *ioc)
+static void coroutine_fn
+qio_channel_set_fd_handlers(QIOChannel *ioc, GIOCondition condition)
 {
-    IOHandler *rd_handler = NULL, *wr_handler = NULL;
-    AioContext *ctx;
+    AioContext *ctx = ioc->follow_coroutine_ctx ?
+        qemu_coroutine_get_aio_context(qemu_coroutine_self()) :
+        iohandler_get_aio_context();
+    AioContext *read_ctx = NULL;
+    IOHandler *io_read = NULL;
+    AioContext *write_ctx = NULL;
+    IOHandler *io_write = NULL;
 
-    if (ioc->read_coroutine) {
-        rd_handler = qio_channel_restart_read;
-    }
-    if (ioc->write_coroutine) {
-        wr_handler = qio_channel_restart_write;
+    if (condition == G_IO_IN) {
+        ioc->read_coroutine = qemu_coroutine_self();
+        ioc->read_ctx = ctx;
+        read_ctx = ctx;
+        io_read = qio_channel_restart_read;
+
+        /*
+         * Thread safety: if the other coroutine is set and its AioContext
+         * matches ours, then there is mutual exclusion between read and write
+         * because they share a single thread and it's safe to set both read
+         * and write fd handlers here. If the AioContext does not match ours,
+         * then both threads may run in parallel but there is no shared state
+         * to worry about.
+         */
+        if (ioc->write_coroutine && ioc->write_ctx == ctx) {
+            write_ctx = ctx;
+            io_write = qio_channel_restart_write;
+        }
+    } else if (condition == G_IO_OUT) {
+        ioc->write_coroutine = qemu_coroutine_self();
+        ioc->write_ctx = ctx;
+        write_ctx = ctx;
+        io_write = qio_channel_restart_write;
+        if (ioc->read_coroutine && ioc->read_ctx == ctx) {
+            read_ctx = ctx;
+            io_read = qio_channel_restart_read;
+        }
+    } else {
+        abort();
     }
 
-    ctx = ioc->ctx ? ioc->ctx : iohandler_get_aio_context();
-    qio_channel_set_aio_fd_handler(ioc, ctx, rd_handler, wr_handler, ioc);
+    qio_channel_set_aio_fd_handler(ioc, read_ctx, io_read,
+            write_ctx, io_write, ioc);
 }
 
-void qio_channel_attach_aio_context(QIOChannel *ioc,
-                                    AioContext *ctx)
+static void coroutine_fn
+qio_channel_clear_fd_handlers(QIOChannel *ioc, GIOCondition condition)
 {
-    assert(!ioc->read_coroutine);
-    assert(!ioc->write_coroutine);
-    ioc->ctx = ctx;
-}
+    AioContext *read_ctx = NULL;
+    IOHandler *io_read = NULL;
+    AioContext *write_ctx = NULL;
+    IOHandler *io_write = NULL;
+    AioContext *ctx;
 
-void qio_channel_detach_aio_context(QIOChannel *ioc)
-{
-    ioc->read_coroutine = NULL;
-    ioc->write_coroutine = NULL;
-    qio_channel_set_aio_fd_handlers(ioc);
-    ioc->ctx = NULL;
+    if (condition == G_IO_IN) {
+        ctx = ioc->read_ctx;
+        read_ctx = ctx;
+        io_read = NULL;
+        if (ioc->write_coroutine && ioc->write_ctx == ctx) {
+            write_ctx = ctx;
+            io_write = qio_channel_restart_write;
+        }
+    } else if (condition == G_IO_OUT) {
+        ctx = ioc->write_ctx;
+        write_ctx = ctx;
+        io_write = NULL;
+        if (ioc->read_coroutine && ioc->read_ctx == ctx) {
+            read_ctx = ctx;
+            io_read = qio_channel_restart_read;
+        }
+    } else {
+        abort();
+    }
+
+    qio_channel_set_aio_fd_handler(ioc, read_ctx, io_read,
+            write_ctx, io_write, ioc);
 }
 
 void coroutine_fn qio_channel_yield(QIOChannel *ioc,
                                     GIOCondition condition)
 {
-    AioContext *ioc_ctx = ioc->ctx ?: qemu_get_aio_context();
+    AioContext *ioc_ctx;
 
     assert(qemu_in_coroutine());
-    assert(in_aio_context_home_thread(ioc_ctx));
+    ioc_ctx = qemu_coroutine_get_aio_context(qemu_coroutine_self());
 
     if (condition == G_IO_IN) {
         assert(!ioc->read_coroutine);
-        ioc->read_coroutine = qemu_coroutine_self();
     } else if (condition == G_IO_OUT) {
         assert(!ioc->write_coroutine);
-        ioc->write_coroutine = qemu_coroutine_self();
     } else {
         abort();
     }
-    qio_channel_set_aio_fd_handlers(ioc);
+    qio_channel_set_fd_handlers(ioc, condition);
     qemu_coroutine_yield();
     assert(in_aio_context_home_thread(ioc_ctx));
 
@@ -599,11 +652,10 @@ void coroutine_fn qio_channel_yield(QIOChannel *ioc,
      * through the aio_fd_handlers. */
     if (condition == G_IO_IN) {
         assert(ioc->read_coroutine == NULL);
-        qio_channel_set_aio_fd_handlers(ioc);
     } else if (condition == G_IO_OUT) {
         assert(ioc->write_coroutine == NULL);
-        qio_channel_set_aio_fd_handlers(ioc);
     }
+    qio_channel_clear_fd_handlers(ioc, condition);
 }
 
 void qio_channel_wake_read(QIOChannel *ioc)
@@ -653,6 +705,10 @@ static void qio_channel_finalize(Object *obj)
 {
     QIOChannel *ioc = QIO_CHANNEL(obj);
 
+    /* Must not have coroutines in qio_channel_yield() */
+    assert(!ioc->read_coroutine);
+    assert(!ioc->write_coroutine);
+
     g_free(ioc->name);
 
 #ifdef _WIN32
index b41c305bd967a174155beacce56d8e43882b7ea7..b753286414a7ec2f3c4de0e20f1478ac35224091 100644 (file)
@@ -138,12 +138,14 @@ static void iothread_instance_finalize(Object *obj)
     qemu_sem_destroy(&iothread->init_done_sem);
 }
 
-static void iothread_init_gcontext(IOThread *iothread)
+static void iothread_init_gcontext(IOThread *iothread, const char *thread_name)
 {
     GSource *source;
+    g_autofree char *name = g_strdup_printf("%s aio-context", thread_name);
 
     iothread->worker_context = g_main_context_new();
     source = aio_get_g_source(iothread_get_aio_context(iothread));
+    g_source_set_name(source, name);
     g_source_attach(source, iothread->worker_context);
     g_source_unref(source);
     iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
@@ -180,7 +182,7 @@ static void iothread_init(EventLoopBase *base, Error **errp)
 {
     Error *local_error = NULL;
     IOThread *iothread = IOTHREAD(base);
-    char *thread_name;
+    g_autofree char *thread_name = NULL;
 
     iothread->stopping = false;
     iothread->running = true;
@@ -189,11 +191,14 @@ static void iothread_init(EventLoopBase *base, Error **errp)
         return;
     }
 
+    thread_name = g_strdup_printf("IO %s",
+                        object_get_canonical_path_component(OBJECT(base)));
+
     /*
      * Init one GMainContext for the iothread unconditionally, even if
      * it's not used
      */
-    iothread_init_gcontext(iothread);
+    iothread_init_gcontext(iothread, thread_name);
 
     iothread_set_aio_context_params(base, &local_error);
     if (local_error) {
@@ -206,11 +211,8 @@ static void iothread_init(EventLoopBase *base, Error **errp)
     /* This assumes we are called from a thread with useful CPU affinity for us
      * to inherit.
      */
-    thread_name = g_strdup_printf("IO %s",
-                        object_get_canonical_path_component(OBJECT(base)));
     qemu_thread_create(&iothread->thread, thread_name, iothread_run,
                        iothread, QEMU_THREAD_JOINABLE);
-    g_free(thread_name);
 
     /* Wait for initialization to complete */
     while (iothread->thread_id == -1) {
index bf9831c7156f5bda1a1735d1b8f28402d4319e1f..0e31bdfabf13adece9fc2bf33d1b8efdc0605634 100644 (file)
@@ -3287,6 +3287,7 @@ if have_system
     'hw/ssi',
     'hw/timer',
     'hw/tpm',
+    'hw/ufs',
     'hw/usb',
     'hw/vfio',
     'hw/virtio',
index b7374363c3fc932b259a3adec97dbd005e92a8f7..fff8d870942f26e500045038390d8dbd00b02ca3 100644 (file)
@@ -158,8 +158,9 @@ qio_channel_block_close(QIOChannel *ioc,
 
 static void
 qio_channel_block_set_aio_fd_handler(QIOChannel *ioc,
-                                     AioContext *ctx,
+                                     AioContext *read_ctx,
                                      IOHandler *io_read,
+                                     AioContext *write_ctx,
                                      IOHandler *io_write,
                                      void *opaque)
 {
index ca430d319d9dd4ae8ff71b350fc0414d52a63981..a2a3db35b1d1481adb06594ff387de642d0603c7 100644 (file)
@@ -3103,22 +3103,23 @@ static GSource *qio_channel_rdma_create_watch(QIOChannel *ioc,
 }
 
 static void qio_channel_rdma_set_aio_fd_handler(QIOChannel *ioc,
-                                                  AioContext *ctx,
-                                                  IOHandler *io_read,
-                                                  IOHandler *io_write,
-                                                  void *opaque)
+                                                AioContext *read_ctx,
+                                                IOHandler *io_read,
+                                                AioContext *write_ctx,
+                                                IOHandler *io_write,
+                                                void *opaque)
 {
     QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
     if (io_read) {
-        aio_set_fd_handler(ctx, rioc->rdmain->recv_comp_channel->fd, io_read,
-                           io_write, NULL, NULL, opaque);
-        aio_set_fd_handler(ctx, rioc->rdmain->send_comp_channel->fd, io_read,
-                           io_write, NULL, NULL, opaque);
+        aio_set_fd_handler(read_ctx, rioc->rdmain->recv_comp_channel->fd,
+                           io_read, io_write, NULL, NULL, opaque);
+        aio_set_fd_handler(read_ctx, rioc->rdmain->send_comp_channel->fd,
+                           io_read, io_write, NULL, NULL, opaque);
     } else {
-        aio_set_fd_handler(ctx, rioc->rdmaout->recv_comp_channel->fd, io_read,
-                           io_write, NULL, NULL, opaque);
-        aio_set_fd_handler(ctx, rioc->rdmaout->send_comp_channel->fd, io_read,
-                           io_write, NULL, NULL, opaque);
+        aio_set_fd_handler(write_ctx, rioc->rdmaout->recv_comp_channel->fd,
+                           io_read, io_write, NULL, NULL, opaque);
+        aio_set_fd_handler(write_ctx, rioc->rdmaout->send_comp_channel->fd,
+                           io_read, io_write, NULL, NULL, opaque);
     }
 }
 
index 258ef81ae9f857c6c47945ffecc40e3a803671db..53a6549914c47fcb84dfe857de3c960f048b7c89 100644 (file)
@@ -146,8 +146,7 @@ static int nbd_connect(QIOChannelSocket *sioc, SocketAddress *addr,
         return 0;
     }
 
-    ret = nbd_receive_negotiate(NULL, QIO_CHANNEL(sioc), tlscreds,
-                                tlshostname,
+    ret = nbd_receive_negotiate(QIO_CHANNEL(sioc), tlscreds, tlshostname,
                                 outioc, info, errp);
     if (ret < 0) {
         /*
index 479208d5d9d484bb859815f892ccfe7475c7f697..bd7e20013669cd6b179e99aaba84fed0f25d4172 100644 (file)
@@ -877,8 +877,7 @@ static int nbd_list_meta_contexts(QIOChannel *ioc,
  * Returns: negative errno: failure talking to server
  *          non-negative: enum NBDMode describing server abilities
  */
-static int nbd_start_negotiate(AioContext *aio_context, QIOChannel *ioc,
-                               QCryptoTLSCreds *tlscreds,
+static int nbd_start_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
                                const char *hostname, QIOChannel **outioc,
                                bool structured_reply, bool *zeroes,
                                Error **errp)
@@ -946,10 +945,6 @@ static int nbd_start_negotiate(AioContext *aio_context, QIOChannel *ioc,
                     return -EINVAL;
                 }
                 ioc = *outioc;
-                if (aio_context) {
-                    qio_channel_set_blocking(ioc, false, NULL);
-                    qio_channel_attach_aio_context(ioc, aio_context);
-                }
             } else {
                 error_setg(errp, "Server does not support STARTTLS");
                 return -EINVAL;
@@ -1014,8 +1009,7 @@ static int nbd_negotiate_finish_oldstyle(QIOChannel *ioc, NBDExportInfo *info,
  * Returns: negative errno: failure talking to server
  *          0: server is connected
  */
-int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
-                          QCryptoTLSCreds *tlscreds,
+int nbd_receive_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
                           const char *hostname, QIOChannel **outioc,
                           NBDExportInfo *info, Error **errp)
 {
@@ -1027,7 +1021,7 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
     assert(info->name && strlen(info->name) <= NBD_MAX_STRING_SIZE);
     trace_nbd_receive_negotiate_name(info->name);
 
-    result = nbd_start_negotiate(aio_context, ioc, tlscreds, hostname, outioc,
+    result = nbd_start_negotiate(ioc, tlscreds, hostname, outioc,
                                  info->structured_reply, &zeroes, errp);
     if (result < 0) {
         return result;
@@ -1150,7 +1144,7 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
     QIOChannel *sioc = NULL;
 
     *info = NULL;
-    result = nbd_start_negotiate(NULL, ioc, tlscreds, hostname, &sioc, true,
+    result = nbd_start_negotiate(ioc, tlscreds, hostname, &sioc, true,
                                  NULL, errp);
     if (tlscreds && sioc) {
         ioc = sioc;
index 8486b64b15d234c1bafff51a31f006885b174918..b5f93a20c9c1bf39ece033ea2f552046f98076b7 100644 (file)
@@ -1333,6 +1333,7 @@ static coroutine_fn int nbd_negotiate(NBDClient *client, Error **errp)
      */
 
     qio_channel_set_blocking(client->ioc, false, NULL);
+    qio_channel_set_follow_coroutine_ctx(client->ioc, true);
 
     trace_nbd_negotiate_begin();
     memcpy(buf, "NBDMAGIC", 8);
@@ -1352,11 +1353,6 @@ static coroutine_fn int nbd_negotiate(NBDClient *client, Error **errp)
         return ret;
     }
 
-    /* Attach the channel to the same AioContext as the export */
-    if (client->exp && client->exp->common.ctx) {
-        qio_channel_attach_aio_context(client->ioc, client->exp->common.ctx);
-    }
-
     assert(!client->optlen);
     trace_nbd_negotiate_success();
 
@@ -1465,7 +1461,6 @@ void nbd_client_put(NBDClient *client)
          */
         assert(client->closing);
 
-        qio_channel_detach_aio_context(client->ioc);
         object_unref(OBJECT(client->sioc));
         object_unref(OBJECT(client->ioc));
         if (client->tlscreds) {
@@ -1544,8 +1539,6 @@ static void blk_aio_attached(AioContext *ctx, void *opaque)
     exp->common.ctx = ctx;
 
     QTAILQ_FOREACH(client, &exp->clients, next) {
-        qio_channel_attach_aio_context(client->ioc, ctx);
-
         assert(client->nb_requests == 0);
         assert(client->recv_coroutine == NULL);
         assert(client->send_coroutine == NULL);
@@ -1555,14 +1548,9 @@ static void blk_aio_attached(AioContext *ctx, void *opaque)
 static void blk_aio_detach(void *opaque)
 {
     NBDExport *exp = opaque;
-    NBDClient *client;
 
     trace_nbd_blk_aio_detach(exp->name, exp->common.ctx);
 
-    QTAILQ_FOREACH(client, &exp->clients, next) {
-        qio_channel_detach_aio_context(client->ioc);
-    }
-
     exp->common.ctx = NULL;
 }
 
index aaccaa33184ad3a57ddda4ed3c0ada0b2fb6b741..30eeb6f3c75ab2d6c2bcdf347806f28001e4bc91 100644 (file)
@@ -73,8 +73,6 @@
 
 #define MBR_SIZE 512
 
-static char *srcpath;
-static SocketAddress *saddr;
 static int persistent = 0;
 static enum { RUNNING, TERMINATE, TERMINATED } state;
 static int shared = 1;
@@ -253,6 +251,29 @@ static int qemu_nbd_client_list(SocketAddress *saddr, QCryptoTLSCreds *tls,
 }
 
 
+struct NbdClientOpts {
+    char *device;
+    char *srcpath;
+    SocketAddress *saddr;
+    int old_stderr;
+    bool fork_process;
+    bool verbose;
+};
+
+static void nbd_client_release_pipe(int old_stderr)
+{
+    /* Close stderr so that the qemu-nbd process exits.  */
+    if (dup2(old_stderr, STDERR_FILENO) < 0) {
+        error_report("Could not release pipe to parent: %s",
+                     strerror(errno));
+        exit(EXIT_FAILURE);
+    }
+    if (old_stderr != STDOUT_FILENO && close(old_stderr) < 0) {
+        error_report("Could not release qemu-nbd: %s", strerror(errno));
+        exit(EXIT_FAILURE);
+    }
+}
+
 #if HAVE_NBD_DEVICE
 static void *show_parts(void *arg)
 {
@@ -271,12 +292,6 @@ static void *show_parts(void *arg)
     return NULL;
 }
 
-struct NbdClientOpts {
-    char *device;
-    bool fork_process;
-    bool verbose;
-};
-
 static void *nbd_client_thread(void *arg)
 {
     struct NbdClientOpts *opts = arg;
@@ -289,14 +304,14 @@ static void *nbd_client_thread(void *arg)
 
     sioc = qio_channel_socket_new();
     if (qio_channel_socket_connect_sync(sioc,
-                                        saddr,
+                                        opts->saddr,
                                         &local_error) < 0) {
         error_report_err(local_error);
         goto out;
     }
 
-    if (nbd_receive_negotiate(NULL, QIO_CHANNEL(sioc),
-                              NULL, NULL, NULL, &info, &local_error) < 0) {
+    if (nbd_receive_negotiate(QIO_CHANNEL(sioc), NULL, NULL, NULL,
+                              &info, &local_error) < 0) {
         if (local_error) {
             error_report_err(local_error);
         }
@@ -320,14 +335,9 @@ static void *nbd_client_thread(void *arg)
 
     if (opts->verbose && !opts->fork_process) {
         fprintf(stderr, "NBD device %s is now connected to %s\n",
-                opts->device, srcpath);
+                opts->device, opts->srcpath);
     } else {
-        /* Close stderr so that the qemu-nbd process exits.  */
-        if (dup2(STDOUT_FILENO, STDERR_FILENO) < 0) {
-            error_report("Could not set stderr to /dev/null: %s",
-                         strerror(errno));
-            exit(EXIT_FAILURE);
-        }
+        nbd_client_release_pipe(opts->old_stderr);
     }
 
     if (nbd_client(fd) < 0) {
@@ -519,7 +529,6 @@ int main(int argc, char **argv)
     const char *bindto = NULL;
     const char *port = NULL;
     char *sockpath = NULL;
-    char *device = NULL;
     QemuOpts *sn_opts = NULL;
     const char *sn_id_or_name = NULL;
     const char *sopt = "hVb:o:p:rsnc:dvk:e:f:tl:x:T:D:AB:L";
@@ -582,16 +591,19 @@ int main(int argc, char **argv)
     const char *tlshostname = NULL;
     bool imageOpts = false;
     bool writethrough = false; /* Client will flush as needed. */
-    bool verbose = false;
-    bool fork_process = false;
     bool list = false;
     unsigned socket_activation;
     const char *pid_file_name = NULL;
     const char *selinux_label = NULL;
     BlockExportOptions *export_opts;
-#if HAVE_NBD_DEVICE
-    struct NbdClientOpts opts;
-#endif
+    struct NbdClientOpts opts = {
+        .fork_process = false,
+        .verbose = false,
+        .device = NULL,
+        .srcpath = NULL,
+        .saddr = NULL,
+        .old_stderr = STDOUT_FILENO,
+    };
 
 #ifdef CONFIG_POSIX
     os_setup_early_signal_handling();
@@ -719,7 +731,7 @@ int main(int argc, char **argv)
             disconnect = true;
             break;
         case 'c':
-            device = optarg;
+            opts.device = optarg;
             break;
         case 'e':
             if (qemu_strtoi(optarg, NULL, 0, &shared) < 0 ||
@@ -750,7 +762,7 @@ int main(int argc, char **argv)
             }
             break;
         case 'v':
-            verbose = true;
+            opts.verbose = true;
             break;
         case 'V':
             version(argv[0]);
@@ -782,7 +794,7 @@ int main(int argc, char **argv)
             tlsauthz = optarg;
             break;
         case QEMU_NBD_OPT_FORK:
-            fork_process = true;
+            opts.fork_process = true;
             break;
         case 'L':
             list = true;
@@ -802,12 +814,12 @@ int main(int argc, char **argv)
             exit(EXIT_FAILURE);
         }
         if (export_name || export_description || dev_offset ||
-            device || disconnect || fmt || sn_id_or_name || bitmaps ||
+            opts.device || disconnect || fmt || sn_id_or_name || bitmaps ||
             alloc_depth || seen_aio || seen_discard || seen_cache) {
             error_report("List mode is incompatible with per-device settings");
             exit(EXIT_FAILURE);
         }
-        if (fork_process) {
+        if (opts.fork_process) {
             error_report("List mode is incompatible with forking");
             exit(EXIT_FAILURE);
         }
@@ -832,7 +844,8 @@ int main(int argc, char **argv)
         }
     } else {
         /* Using socket activation - check user didn't use -p etc. */
-        const char *err_msg = socket_activation_validate_opts(device, sockpath,
+        const char *err_msg = socket_activation_validate_opts(opts.device,
+                                                              sockpath,
                                                               bindto, port,
                                                               selinux_label,
                                                               list);
@@ -850,7 +863,7 @@ int main(int argc, char **argv)
     }
 
     if (tlscredsid) {
-        if (device) {
+        if (opts.device) {
             error_report("TLS is not supported with a host device");
             exit(EXIT_FAILURE);
         }
@@ -880,7 +893,7 @@ int main(int argc, char **argv)
 
     if (selinux_label) {
 #ifdef CONFIG_SELINUX
-        if (sockpath == NULL && device == NULL) {
+        if (sockpath == NULL && opts.device == NULL) {
             error_report("--selinux-label is not permitted without --socket");
             exit(EXIT_FAILURE);
         }
@@ -891,13 +904,13 @@ int main(int argc, char **argv)
     }
 
     if (list) {
-        saddr = nbd_build_socket_address(sockpath, bindto, port);
-        return qemu_nbd_client_list(saddr, tlscreds,
+        opts.saddr = nbd_build_socket_address(sockpath, bindto, port);
+        return qemu_nbd_client_list(opts.saddr, tlscreds,
                                     tlshostname ? tlshostname : bindto);
     }
 
 #if !HAVE_NBD_DEVICE
-    if (disconnect || device) {
+    if (disconnect || opts.device) {
         error_report("Kernel /dev/nbdN support not available");
         exit(EXIT_FAILURE);
     }
@@ -919,7 +932,7 @@ int main(int argc, char **argv)
     }
 #endif
 
-    if ((device && !verbose) || fork_process) {
+    if ((opts.device && !opts.verbose) || opts.fork_process) {
 #ifndef WIN32
         g_autoptr(GError) err = NULL;
         int stderr_fd[2];
@@ -944,6 +957,16 @@ int main(int argc, char **argv)
 
             close(stderr_fd[0]);
 
+            /* Remember parent's stderr if we will be restoring it. */
+            if (opts.verbose /* fork_process is set */) {
+                opts.old_stderr = dup(STDERR_FILENO);
+                if (opts.old_stderr < 0) {
+                    error_report("Could not dup original stderr: %s",
+                                 strerror(errno));
+                    exit(EXIT_FAILURE);
+                }
+            }
+
             ret = qemu_daemon(1, 0);
             saved_errno = errno;    /* dup2 will overwrite error below */
 
@@ -1002,9 +1025,9 @@ int main(int argc, char **argv)
 #endif /* WIN32 */
     }
 
-    if (device != NULL && sockpath == NULL) {
+    if (opts.device != NULL && sockpath == NULL) {
         sockpath = g_malloc(128);
-        snprintf(sockpath, 128, SOCKET_PATH, basename(device));
+        snprintf(sockpath, 128, SOCKET_PATH, basename(opts.device));
     }
 
     server = qio_net_listener_new();
@@ -1023,8 +1046,8 @@ int main(int argc, char **argv)
             exit(EXIT_FAILURE);
         }
 #endif
-        saddr = nbd_build_socket_address(sockpath, bindto, port);
-        if (qio_net_listener_open_sync(server, saddr, backlog,
+        opts.saddr = nbd_build_socket_address(sockpath, bindto, port);
+        if (qio_net_listener_open_sync(server, opts.saddr, backlog,
                                        &local_err) < 0) {
             object_unref(OBJECT(server));
             error_report_err(local_err);
@@ -1059,19 +1082,19 @@ int main(int argc, char **argv)
     bdrv_init();
     atexit(qemu_nbd_shutdown);
 
-    srcpath = argv[optind];
+    opts.srcpath = argv[optind];
     if (imageOpts) {
-        QemuOpts *opts;
+        QemuOpts *o;
         if (fmt) {
             error_report("--image-opts and -f are mutually exclusive");
             exit(EXIT_FAILURE);
         }
-        opts = qemu_opts_parse_noisily(&file_opts, srcpath, true);
-        if (!opts) {
+        o = qemu_opts_parse_noisily(&file_opts, opts.srcpath, true);
+        if (!o) {
             qemu_opts_reset(&file_opts);
             exit(EXIT_FAILURE);
         }
-        options = qemu_opts_to_qdict(opts, NULL);
+        options = qemu_opts_to_qdict(o, NULL);
         qemu_opts_reset(&file_opts);
         blk = blk_new_open(NULL, NULL, options, flags, &local_err);
     } else {
@@ -1079,7 +1102,7 @@ int main(int argc, char **argv)
             options = qdict_new();
             qdict_put_str(options, "driver", fmt);
         }
-        blk = blk_new_open(srcpath, NULL, options, flags, &local_err);
+        blk = blk_new_open(opts.srcpath, NULL, options, flags, &local_err);
     }
 
     if (!blk) {
@@ -1145,15 +1168,9 @@ int main(int argc, char **argv)
     blk_exp_add(export_opts, &error_fatal);
     qapi_free_BlockExportOptions(export_opts);
 
-    if (device) {
+    if (opts.device) {
 #if HAVE_NBD_DEVICE
         int ret;
-        opts = (struct NbdClientOpts) {
-            .device = device,
-            .fork_process = fork_process,
-            .verbose = verbose,
-        };
-
         ret = pthread_create(&client_thread, NULL, nbd_client_thread, &opts);
         if (ret != 0) {
             error_report("Failed to create client thread: %s", strerror(ret));
@@ -1179,12 +1196,8 @@ int main(int argc, char **argv)
         exit(EXIT_FAILURE);
     }
 
-    if (fork_process) {
-        if (dup2(STDOUT_FILENO, STDERR_FILENO) < 0) {
-            error_report("Could not set stderr to /dev/null: %s",
-                         strerror(errno));
-            exit(EXIT_FAILURE);
-        }
+    if (opts.fork_process) {
+        nbd_client_release_pipe(opts.old_stderr);
     }
 
     state = RUNNING;
@@ -1203,7 +1216,7 @@ int main(int argc, char **argv)
 
     qemu_opts_del(sn_opts);
 
-    if (device) {
+    if (opts.device) {
         void *ret;
         pthread_join(client_thread, &ret);
         exit(ret != NULL);
index ae44a816e17bb529335404c3f45818065e954366..c6c6347e9b6a5554772908bc4d45f077b9907934 100644 (file)
@@ -735,8 +735,7 @@ static void coroutine_fn prh_co_entry(void *opaque)
 
     qio_channel_set_blocking(QIO_CHANNEL(client->ioc),
                              false, NULL);
-    qio_channel_attach_aio_context(QIO_CHANNEL(client->ioc),
-                                   qemu_get_aio_context());
+    qio_channel_set_follow_coroutine_ctx(QIO_CHANNEL(client->ioc), true);
 
     /* A very simple negotiation for future extensibility.  No features
      * are defined so write 0.
@@ -796,7 +795,6 @@ static void coroutine_fn prh_co_entry(void *opaque)
     }
 
 out:
-    qio_channel_detach_aio_context(QIO_CHANNEL(client->ioc));
     object_unref(OBJECT(client->ioc));
     g_free(client);
 }
index 85d980c7553f0dbbfaf4fc1c4bcbb58eb6dc915f..69849c800ed65c53abdfeec31e8831b5292ef560 100755 (executable)
@@ -136,18 +136,18 @@ IMGPROTO=file IMGFMT=qcow2 TEST_IMG_FILE="$TEST_WRAP" \
 $QEMU_IO -c "write -P 0xaa 0 64k" "$TEST_IMG" | _filter_qemu_io
 
 # Allocate individual subclusters in the top image, and not the whole cluster
-$QEMU_IO -c "write -P 0xbb 28K 2K" -c "write -P 0xcc 34K 2K" "$TEST_WRAP" \
+$QEMU_IO -f qcow2 -c "write -P 0xbb 28K 2K" -c "write -P 0xcc 34K 2K" "$TEST_WRAP" \
     | _filter_qemu_io
 
 # Only 2 subclusters should be allocated in the top image at this point
-$QEMU_IMG map "$TEST_WRAP" | _filter_qemu_img_map
+$QEMU_IO -f qcow2 -c map "$TEST_WRAP"
 
 # Actual copy-on-read operation
-$QEMU_IO -C -c "read -P 0xaa 30K 4K" "$TEST_WRAP" | _filter_qemu_io
+$QEMU_IO -f qcow2 -C -c "read -P 0xaa 30K 4K" "$TEST_WRAP" | _filter_qemu_io
 
 # And here we should have 4 subclusters allocated right in the middle of the
 # top image. Make sure the whole cluster remains unallocated
-$QEMU_IMG map "$TEST_WRAP" | _filter_qemu_img_map
+$QEMU_IO -f qcow2 -c map "$TEST_WRAP"
 
 _check_test_img
 
index 8f34a30afea63401d265f4109295c956c6ddec8e..86c57b51d303c2da0ba09d6c9cb1e2d6b8fb4228 100644 (file)
@@ -42,17 +42,15 @@ wrote 2048/2048 bytes at offset 28672
 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 wrote 2048/2048 bytes at offset 34816
 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-Offset          Length          File
-0               0x7000          TEST_DIR/t.IMGFMT
-0x7000          0x800           TEST_DIR/t.wrap.IMGFMT
-0x7800          0x1000          TEST_DIR/t.IMGFMT
-0x8800          0x800           TEST_DIR/t.wrap.IMGFMT
-0x9000          0x7000          TEST_DIR/t.IMGFMT
+28 KiB (0x7000) bytes not allocated at offset 0 bytes (0x0)
+2 KiB (0x800) bytes     allocated at offset 28 KiB (0x7000)
+4 KiB (0x1000) bytes not allocated at offset 30 KiB (0x7800)
+2 KiB (0x800) bytes     allocated at offset 34 KiB (0x8800)
+28 KiB (0x7000) bytes not allocated at offset 36 KiB (0x9000)
 read 4096/4096 bytes at offset 30720
 4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-Offset          Length          File
-0               0x7000          TEST_DIR/t.IMGFMT
-0x7000          0x2000          TEST_DIR/t.wrap.IMGFMT
-0x9000          0x7000          TEST_DIR/t.IMGFMT
+28 KiB (0x7000) bytes not allocated at offset 0 bytes (0x0)
+8 KiB (0x2000) bytes     allocated at offset 28 KiB (0x7000)
+28 KiB (0x7000) bytes not allocated at offset 36 KiB (0x9000)
 No errors were found on the image.
 *** done
index 4a9b0267e50fdfedecf7f508c87f71019c95ee95..1fba07f4ed78ec4e19fcd7ccfb44dc4a3933b19d 100644 (file)
@@ -269,6 +269,7 @@ qos_test_ss.add(
   'virtio-iommu-test.c',
   'vmxnet3-test.c',
   'igb-test.c',
+  'ufs-test.c',
 )
 
 if config_all_devices.has_key('CONFIG_VIRTIO_SERIAL')
diff --git a/tests/qtest/ufs-test.c b/tests/qtest/ufs-test.c
new file mode 100644 (file)
index 0000000..ed3dbca
--- /dev/null
@@ -0,0 +1,587 @@
+/*
+ * QTest testcase for UFS
+ *
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/module.h"
+#include "qemu/units.h"
+#include "libqtest.h"
+#include "libqos/qgraph.h"
+#include "libqos/pci.h"
+#include "scsi/constants.h"
+#include "include/block/ufs.h"
+
+/* Test images sizes in Bytes */
+#define TEST_IMAGE_SIZE (64 * 1024 * 1024)
+/* Timeout for various operations, in seconds. */
+#define TIMEOUT_SECONDS 10
+/* Maximum PRD entry count */
+#define MAX_PRD_ENTRY_COUNT 10
+#define PRD_ENTRY_DATA_SIZE 4096
+/* Constants to build upiu */
+#define UTP_COMMAND_DESCRIPTOR_SIZE 4096
+#define UTP_RESPONSE_UPIU_OFFSET 1024
+#define UTP_PRDT_UPIU_OFFSET 2048
+
+typedef struct QUfs QUfs;
+
+struct QUfs {
+    QOSGraphObject obj;
+    QPCIDevice dev;
+    QPCIBar bar;
+
+    uint64_t utrlba;
+    uint64_t utmrlba;
+    uint64_t cmd_desc_addr;
+    uint64_t data_buffer_addr;
+
+    bool enabled;
+};
+
+static inline uint32_t ufs_rreg(QUfs *ufs, size_t offset)
+{
+    return qpci_io_readl(&ufs->dev, ufs->bar, offset);
+}
+
+static inline void ufs_wreg(QUfs *ufs, size_t offset, uint32_t value)
+{
+    qpci_io_writel(&ufs->dev, ufs->bar, offset, value);
+}
+
+static void ufs_wait_for_irq(QUfs *ufs)
+{
+    uint64_t end_time;
+    uint32_t is;
+    /* Wait for device to reset as the linux driver does. */
+    end_time = g_get_monotonic_time() + TIMEOUT_SECONDS * G_TIME_SPAN_SECOND;
+    do {
+        qtest_clock_step(ufs->dev.bus->qts, 100);
+        is = ufs_rreg(ufs, A_IS);
+    } while (is == 0 && g_get_monotonic_time() < end_time);
+}
+
+static UtpTransferReqDesc ufs_build_req_utrd(uint64_t cmd_desc_addr,
+                                             uint8_t slot,
+                                             uint32_t data_direction,
+                                             uint16_t prd_table_length)
+{
+    UtpTransferReqDesc req = { 0 };
+    uint64_t command_desc_base_addr =
+        cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE;
+
+    req.header.dword_0 =
+        cpu_to_le32(1 << 28 | data_direction | UFS_UTP_REQ_DESC_INT_CMD);
+    req.header.dword_2 = cpu_to_le32(UFS_OCS_INVALID_COMMAND_STATUS);
+
+    req.command_desc_base_addr_hi = cpu_to_le32(command_desc_base_addr >> 32);
+    req.command_desc_base_addr_lo =
+        cpu_to_le32(command_desc_base_addr & 0xffffffff);
+    req.response_upiu_offset =
+        cpu_to_le16(UTP_RESPONSE_UPIU_OFFSET / sizeof(uint32_t));
+    req.response_upiu_length = cpu_to_le16(sizeof(UtpUpiuRsp));
+    req.prd_table_offset = cpu_to_le16(UTP_PRDT_UPIU_OFFSET / sizeof(uint32_t));
+    req.prd_table_length = cpu_to_le16(prd_table_length);
+    return req;
+}
+
+static void ufs_send_nop_out(QUfs *ufs, uint8_t slot,
+                             UtpTransferReqDesc *utrd_out, UtpUpiuRsp *rsp_out)
+{
+    /* Build up utp transfer request descriptor */
+    UtpTransferReqDesc utrd = ufs_build_req_utrd(ufs->cmd_desc_addr, slot,
+                                                 UFS_UTP_NO_DATA_TRANSFER, 0);
+    uint64_t utrd_addr = ufs->utrlba + slot * sizeof(UtpTransferReqDesc);
+    uint64_t req_upiu_addr =
+        ufs->cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE;
+    uint64_t rsp_upiu_addr = req_upiu_addr + UTP_RESPONSE_UPIU_OFFSET;
+    qtest_memwrite(ufs->dev.bus->qts, utrd_addr, &utrd, sizeof(utrd));
+
+    /* Build up request upiu */
+    UtpUpiuReq req_upiu = { 0 };
+    req_upiu.header.trans_type = UFS_UPIU_TRANSACTION_NOP_OUT;
+    req_upiu.header.task_tag = slot;
+    qtest_memwrite(ufs->dev.bus->qts, req_upiu_addr, &req_upiu,
+                   sizeof(req_upiu));
+
+    /* Ring Doorbell */
+    ufs_wreg(ufs, A_UTRLDBR, 1);
+    ufs_wait_for_irq(ufs);
+    g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS));
+    ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1));
+
+    qtest_memread(ufs->dev.bus->qts, utrd_addr, utrd_out, sizeof(*utrd_out));
+    qtest_memread(ufs->dev.bus->qts, rsp_upiu_addr, rsp_out, sizeof(*rsp_out));
+}
+
+static void ufs_send_query(QUfs *ufs, uint8_t slot, uint8_t query_function,
+                           uint8_t query_opcode, uint8_t idn, uint8_t index,
+                           UtpTransferReqDesc *utrd_out, UtpUpiuRsp *rsp_out)
+{
+    /* Build up utp transfer request descriptor */
+    UtpTransferReqDesc utrd = ufs_build_req_utrd(ufs->cmd_desc_addr, slot,
+                                                 UFS_UTP_NO_DATA_TRANSFER, 0);
+    uint64_t utrd_addr = ufs->utrlba + slot * sizeof(UtpTransferReqDesc);
+    uint64_t req_upiu_addr =
+        ufs->cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE;
+    uint64_t rsp_upiu_addr = req_upiu_addr + UTP_RESPONSE_UPIU_OFFSET;
+    qtest_memwrite(ufs->dev.bus->qts, utrd_addr, &utrd, sizeof(utrd));
+
+    /* Build up request upiu */
+    UtpUpiuReq req_upiu = { 0 };
+    req_upiu.header.trans_type = UFS_UPIU_TRANSACTION_QUERY_REQ;
+    req_upiu.header.query_func = query_function;
+    req_upiu.header.task_tag = slot;
+    /*
+     * QEMU UFS does not currently support Write descriptor and Write attribute,
+     * so the value of data_segment_length is always 0.
+     */
+    req_upiu.header.data_segment_length = 0;
+    req_upiu.qr.opcode = query_opcode;
+    req_upiu.qr.idn = idn;
+    req_upiu.qr.index = index;
+    qtest_memwrite(ufs->dev.bus->qts, req_upiu_addr, &req_upiu,
+                   sizeof(req_upiu));
+
+    /* Ring Doorbell */
+    ufs_wreg(ufs, A_UTRLDBR, 1);
+    ufs_wait_for_irq(ufs);
+    g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS));
+    ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1));
+
+    qtest_memread(ufs->dev.bus->qts, utrd_addr, utrd_out, sizeof(*utrd_out));
+    qtest_memread(ufs->dev.bus->qts, rsp_upiu_addr, rsp_out, sizeof(*rsp_out));
+}
+
+static void ufs_send_scsi_command(QUfs *ufs, uint8_t slot, uint8_t lun,
+                                  const uint8_t *cdb, const uint8_t *data_in,
+                                  size_t data_in_len, uint8_t *data_out,
+                                  size_t data_out_len,
+                                  UtpTransferReqDesc *utrd_out,
+                                  UtpUpiuRsp *rsp_out)
+
+{
+    /* Build up PRDT */
+    UfshcdSgEntry entries[MAX_PRD_ENTRY_COUNT] = {
+        0,
+    };
+    uint8_t flags;
+    uint16_t prd_table_length, i;
+    uint32_t data_direction, data_len;
+    uint64_t req_upiu_addr =
+        ufs->cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE;
+    uint64_t prdt_addr = req_upiu_addr + UTP_PRDT_UPIU_OFFSET;
+
+    g_assert_true(data_in_len < MAX_PRD_ENTRY_COUNT * PRD_ENTRY_DATA_SIZE);
+    g_assert_true(data_out_len < MAX_PRD_ENTRY_COUNT * PRD_ENTRY_DATA_SIZE);
+    if (data_in_len > 0) {
+        g_assert_nonnull(data_in);
+        data_direction = UFS_UTP_HOST_TO_DEVICE;
+        data_len = data_in_len;
+        flags = UFS_UPIU_CMD_FLAGS_WRITE;
+    } else if (data_out_len > 0) {
+        g_assert_nonnull(data_out);
+        data_direction = UFS_UTP_DEVICE_TO_HOST;
+        data_len = data_out_len;
+        flags = UFS_UPIU_CMD_FLAGS_READ;
+    } else {
+        data_direction = UFS_UTP_NO_DATA_TRANSFER;
+        data_len = 0;
+        flags = UFS_UPIU_CMD_FLAGS_NONE;
+    }
+    prd_table_length = DIV_ROUND_UP(data_len, PRD_ENTRY_DATA_SIZE);
+
+    qtest_memset(ufs->dev.bus->qts, ufs->data_buffer_addr, 0,
+                 MAX_PRD_ENTRY_COUNT * PRD_ENTRY_DATA_SIZE);
+    if (data_in_len) {
+        qtest_memwrite(ufs->dev.bus->qts, ufs->data_buffer_addr, data_in,
+                       data_in_len);
+    }
+
+    for (i = 0; i < prd_table_length; i++) {
+        entries[i].addr =
+            cpu_to_le64(ufs->data_buffer_addr + i * sizeof(UfshcdSgEntry));
+        if (i + 1 != prd_table_length) {
+            entries[i].size = cpu_to_le32(PRD_ENTRY_DATA_SIZE - 1);
+        } else {
+            entries[i].size = cpu_to_le32(
+                data_len - (PRD_ENTRY_DATA_SIZE * (prd_table_length - 1)) - 1);
+        }
+    }
+    qtest_memwrite(ufs->dev.bus->qts, prdt_addr, entries,
+                   prd_table_length * sizeof(UfshcdSgEntry));
+
+    /* Build up utp transfer request descriptor */
+    UtpTransferReqDesc utrd = ufs_build_req_utrd(
+        ufs->cmd_desc_addr, slot, data_direction, prd_table_length);
+    uint64_t utrd_addr = ufs->utrlba + slot * sizeof(UtpTransferReqDesc);
+    uint64_t rsp_upiu_addr = req_upiu_addr + UTP_RESPONSE_UPIU_OFFSET;
+    qtest_memwrite(ufs->dev.bus->qts, utrd_addr, &utrd, sizeof(utrd));
+
+    /* Build up request upiu */
+    UtpUpiuReq req_upiu = { 0 };
+    req_upiu.header.trans_type = UFS_UPIU_TRANSACTION_COMMAND;
+    req_upiu.header.flags = flags;
+    req_upiu.header.lun = lun;
+    req_upiu.header.task_tag = slot;
+    req_upiu.sc.exp_data_transfer_len = cpu_to_be32(data_len);
+    memcpy(req_upiu.sc.cdb, cdb, UFS_CDB_SIZE);
+    qtest_memwrite(ufs->dev.bus->qts, req_upiu_addr, &req_upiu,
+                   sizeof(req_upiu));
+
+    /* Ring Doorbell */
+    ufs_wreg(ufs, A_UTRLDBR, 1);
+    ufs_wait_for_irq(ufs);
+    g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS));
+    ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1));
+
+    qtest_memread(ufs->dev.bus->qts, utrd_addr, utrd_out, sizeof(*utrd_out));
+    qtest_memread(ufs->dev.bus->qts, rsp_upiu_addr, rsp_out, sizeof(*rsp_out));
+    if (data_out_len) {
+        qtest_memread(ufs->dev.bus->qts, ufs->data_buffer_addr, data_out,
+                      data_out_len);
+    }
+}
+
+/**
+ * Initialize Ufs host controller and logical unit.
+ * After running this function, you can make a transfer request to the UFS.
+ */
+static void ufs_init(QUfs *ufs, QGuestAllocator *alloc)
+{
+    uint64_t end_time;
+    uint32_t nutrs, nutmrs;
+    uint32_t hcs, is, ucmdarg2, cap;
+    uint32_t hce = 0, ie = 0;
+    UtpTransferReqDesc utrd;
+    UtpUpiuRsp rsp_upiu;
+
+    ufs->bar = qpci_iomap(&ufs->dev, 0, NULL);
+    qpci_device_enable(&ufs->dev);
+
+    /* Start host controller initialization */
+    hce = FIELD_DP32(hce, HCE, HCE, 1);
+    ufs_wreg(ufs, A_HCE, hce);
+
+    /* Wait for device to reset */
+    end_time = g_get_monotonic_time() + TIMEOUT_SECONDS * G_TIME_SPAN_SECOND;
+    do {
+        qtest_clock_step(ufs->dev.bus->qts, 100);
+        hce = FIELD_EX32(ufs_rreg(ufs, A_HCE), HCE, HCE);
+    } while (hce == 0 && g_get_monotonic_time() < end_time);
+    g_assert_cmpuint(hce, ==, 1);
+
+    /* Enable interrupt */
+    ie = FIELD_DP32(ie, IE, UCCE, 1);
+    ie = FIELD_DP32(ie, IE, UHESE, 1);
+    ie = FIELD_DP32(ie, IE, UHXSE, 1);
+    ie = FIELD_DP32(ie, IE, UPMSE, 1);
+    ufs_wreg(ufs, A_IE, ie);
+
+    /* Send DME_LINK_STARTUP uic command */
+    hcs = ufs_rreg(ufs, A_HCS);
+    g_assert_true(FIELD_EX32(hcs, HCS, UCRDY));
+
+    ufs_wreg(ufs, A_UCMDARG1, 0);
+    ufs_wreg(ufs, A_UCMDARG2, 0);
+    ufs_wreg(ufs, A_UCMDARG3, 0);
+    ufs_wreg(ufs, A_UICCMD, UFS_UIC_CMD_DME_LINK_STARTUP);
+
+    is = ufs_rreg(ufs, A_IS);
+    g_assert_true(FIELD_EX32(is, IS, UCCS));
+    ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UCCS, 1));
+
+    ucmdarg2 = ufs_rreg(ufs, A_UCMDARG2);
+    g_assert_cmpuint(ucmdarg2, ==, 0);
+    is = ufs_rreg(ufs, A_IS);
+    g_assert_cmpuint(is, ==, 0);
+    hcs = ufs_rreg(ufs, A_HCS);
+    g_assert_true(FIELD_EX32(hcs, HCS, DP));
+    g_assert_true(FIELD_EX32(hcs, HCS, UTRLRDY));
+    g_assert_true(FIELD_EX32(hcs, HCS, UTMRLRDY));
+    g_assert_true(FIELD_EX32(hcs, HCS, UCRDY));
+
+    /* Enable all interrupt functions */
+    ie = FIELD_DP32(ie, IE, UTRCE, 1);
+    ie = FIELD_DP32(ie, IE, UEE, 1);
+    ie = FIELD_DP32(ie, IE, UPMSE, 1);
+    ie = FIELD_DP32(ie, IE, UHXSE, 1);
+    ie = FIELD_DP32(ie, IE, UHESE, 1);
+    ie = FIELD_DP32(ie, IE, UTMRCE, 1);
+    ie = FIELD_DP32(ie, IE, UCCE, 1);
+    ie = FIELD_DP32(ie, IE, DFEE, 1);
+    ie = FIELD_DP32(ie, IE, HCFEE, 1);
+    ie = FIELD_DP32(ie, IE, SBFEE, 1);
+    ie = FIELD_DP32(ie, IE, CEFEE, 1);
+    ufs_wreg(ufs, A_IE, ie);
+    ufs_wreg(ufs, A_UTRIACR, 0);
+
+    /* Enable tranfer request and task management request */
+    cap = ufs_rreg(ufs, A_CAP);
+    nutrs = FIELD_EX32(cap, CAP, NUTRS) + 1;
+    nutmrs = FIELD_EX32(cap, CAP, NUTMRS) + 1;
+    ufs->cmd_desc_addr =
+        guest_alloc(alloc, nutrs * UTP_COMMAND_DESCRIPTOR_SIZE);
+    ufs->data_buffer_addr =
+        guest_alloc(alloc, MAX_PRD_ENTRY_COUNT * PRD_ENTRY_DATA_SIZE);
+    ufs->utrlba = guest_alloc(alloc, nutrs * sizeof(UtpTransferReqDesc));
+    ufs->utmrlba = guest_alloc(alloc, nutmrs * sizeof(UtpTaskReqDesc));
+
+    ufs_wreg(ufs, A_UTRLBA, ufs->utrlba & 0xffffffff);
+    ufs_wreg(ufs, A_UTRLBAU, ufs->utrlba >> 32);
+    ufs_wreg(ufs, A_UTMRLBA, ufs->utmrlba & 0xffffffff);
+    ufs_wreg(ufs, A_UTMRLBAU, ufs->utmrlba >> 32);
+    ufs_wreg(ufs, A_UTRLRSR, 1);
+    ufs_wreg(ufs, A_UTMRLRSR, 1);
+
+    /* Send nop out to test transfer request */
+    ufs_send_nop_out(ufs, 0, &utrd, &rsp_upiu);
+    g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+
+    /* Set fDeviceInit flag via query request */
+    ufs_send_query(ufs, 0, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST,
+                   UFS_UPIU_QUERY_OPCODE_SET_FLAG,
+                   UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, &utrd, &rsp_upiu);
+    g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+
+    /* Wait for device to reset */
+    end_time = g_get_monotonic_time() + TIMEOUT_SECONDS * G_TIME_SPAN_SECOND;
+    do {
+        qtest_clock_step(ufs->dev.bus->qts, 100);
+        ufs_send_query(ufs, 0, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST,
+                       UFS_UPIU_QUERY_OPCODE_READ_FLAG,
+                       UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, &utrd, &rsp_upiu);
+    } while (be32_to_cpu(rsp_upiu.qr.value) != 0 &&
+             g_get_monotonic_time() < end_time);
+    g_assert_cmpuint(be32_to_cpu(rsp_upiu.qr.value), ==, 0);
+
+    ufs->enabled = true;
+}
+
+static void ufs_exit(QUfs *ufs, QGuestAllocator *alloc)
+{
+    if (ufs->enabled) {
+        guest_free(alloc, ufs->utrlba);
+        guest_free(alloc, ufs->utmrlba);
+        guest_free(alloc, ufs->cmd_desc_addr);
+        guest_free(alloc, ufs->data_buffer_addr);
+    }
+
+    qpci_iounmap(&ufs->dev, ufs->bar);
+}
+
+static void *ufs_get_driver(void *obj, const char *interface)
+{
+    QUfs *ufs = obj;
+
+    if (!g_strcmp0(interface, "pci-device")) {
+        return &ufs->dev;
+    }
+
+    fprintf(stderr, "%s not present in ufs\n", interface);
+    g_assert_not_reached();
+}
+
+static void *ufs_create(void *pci_bus, QGuestAllocator *alloc, void *addr)
+{
+    QUfs *ufs = g_new0(QUfs, 1);
+    QPCIBus *bus = pci_bus;
+
+    qpci_device_init(&ufs->dev, bus, addr);
+    ufs->obj.get_driver = ufs_get_driver;
+
+    return &ufs->obj;
+}
+
+static void ufstest_reg_read(void *obj, void *data, QGuestAllocator *alloc)
+{
+    QUfs *ufs = obj;
+    uint32_t cap;
+
+    ufs->bar = qpci_iomap(&ufs->dev, 0, NULL);
+    qpci_device_enable(&ufs->dev);
+
+    cap = ufs_rreg(ufs, A_CAP);
+    g_assert_cmpuint(FIELD_EX32(cap, CAP, NUTRS), ==, 31);
+    g_assert_cmpuint(FIELD_EX32(cap, CAP, NUTMRS), ==, 7);
+    g_assert_cmpuint(FIELD_EX32(cap, CAP, 64AS), ==, 1);
+
+    qpci_iounmap(&ufs->dev, ufs->bar);
+}
+
+static void ufstest_init(void *obj, void *data, QGuestAllocator *alloc)
+{
+    QUfs *ufs = obj;
+
+    uint8_t buf[4096] = { 0 };
+    const uint8_t report_luns_cdb[UFS_CDB_SIZE] = {
+        /* allocation length 4096 */
+        REPORT_LUNS, 0x00, 0x00, 0x00, 0x00, 0x00,
+        0x00,        0x00, 0x10, 0x00, 0x00, 0x00
+    };
+    const uint8_t test_unit_ready_cdb[UFS_CDB_SIZE] = {
+        TEST_UNIT_READY,
+    };
+    UtpTransferReqDesc utrd;
+    UtpUpiuRsp rsp_upiu;
+
+    ufs_init(ufs, alloc);
+
+    /* Check REPORT_LUNS */
+    ufs_send_scsi_command(ufs, 0, 0, report_luns_cdb, NULL, 0, buf, sizeof(buf),
+                          &utrd, &rsp_upiu);
+    g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+    g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, GOOD);
+    /* LUN LIST LENGTH should be 8, in big endian */
+    g_assert_cmpuint(buf[3], ==, 8);
+    /* There is one logical unit whose lun is 0 */
+    g_assert_cmpuint(buf[9], ==, 0);
+
+    /* Check TEST_UNIT_READY */
+    ufs_send_scsi_command(ufs, 0, 0, test_unit_ready_cdb, NULL, 0, NULL, 0,
+                          &utrd, &rsp_upiu);
+    g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+    g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, GOOD);
+
+    ufs_exit(ufs, alloc);
+}
+
+static void ufstest_read_write(void *obj, void *data, QGuestAllocator *alloc)
+{
+    QUfs *ufs = obj;
+    uint8_t read_buf[4096] = { 0 };
+    uint8_t write_buf[4096] = { 0 };
+    const uint8_t read_capacity_cdb[UFS_CDB_SIZE] = {
+        /* allocation length 4096 */
+        SERVICE_ACTION_IN_16,
+        SAI_READ_CAPACITY_16,
+        0x00,
+        0x00,
+        0x00,
+        0x00,
+        0x00,
+        0x00,
+        0x00,
+        0x00,
+        0x00,
+        0x00,
+        0x10,
+        0x00,
+        0x00,
+        0x00
+    };
+    const uint8_t read_cdb[UFS_CDB_SIZE] = {
+        /* READ(10) to LBA 0, transfer length 1 */
+        READ_10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00
+    };
+    const uint8_t write_cdb[UFS_CDB_SIZE] = {
+        /* WRITE(10) to LBA 0, transfer length 1 */
+        WRITE_10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00
+    };
+    uint32_t block_size;
+    UtpTransferReqDesc utrd;
+    UtpUpiuRsp rsp_upiu;
+
+    ufs_init(ufs, alloc);
+
+    /* Read capacity */
+    ufs_send_scsi_command(ufs, 0, 1, read_capacity_cdb, NULL, 0, read_buf,
+                          sizeof(read_buf), &utrd, &rsp_upiu);
+    g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+    g_assert_cmpuint(rsp_upiu.header.scsi_status, ==,
+                     UFS_COMMAND_RESULT_SUCESS);
+    block_size = ldl_be_p(&read_buf[8]);
+    g_assert_cmpuint(block_size, ==, 4096);
+
+    /* Write data */
+    memset(write_buf, rand() % 255 + 1, block_size);
+    ufs_send_scsi_command(ufs, 0, 1, write_cdb, write_buf, block_size, NULL, 0,
+                          &utrd, &rsp_upiu);
+    g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+    g_assert_cmpuint(rsp_upiu.header.scsi_status, ==,
+                     UFS_COMMAND_RESULT_SUCESS);
+
+    /* Read data and verify */
+    ufs_send_scsi_command(ufs, 0, 1, read_cdb, NULL, 0, read_buf, block_size,
+                          &utrd, &rsp_upiu);
+    g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS);
+    g_assert_cmpuint(rsp_upiu.header.scsi_status, ==,
+                     UFS_COMMAND_RESULT_SUCESS);
+    g_assert_cmpint(memcmp(read_buf, write_buf, block_size), ==, 0);
+
+    ufs_exit(ufs, alloc);
+}
+
+static void drive_destroy(void *path)
+{
+    unlink(path);
+    g_free(path);
+    qos_invalidate_command_line();
+}
+
+static char *drive_create(void)
+{
+    int fd, ret;
+    char *t_path;
+
+    /* Create a temporary raw image */
+    fd = g_file_open_tmp("qtest-ufs.XXXXXX", &t_path, NULL);
+    g_assert_cmpint(fd, >=, 0);
+    ret = ftruncate(fd, TEST_IMAGE_SIZE);
+    g_assert_cmpint(ret, ==, 0);
+    close(fd);
+
+    g_test_queue_destroy(drive_destroy, t_path);
+    return t_path;
+}
+
+static void *ufs_blk_test_setup(GString *cmd_line, void *arg)
+{
+    char *tmp_path = drive_create();
+
+    g_string_append_printf(cmd_line,
+                           " -blockdev file,filename=%s,node-name=drv1 "
+                           "-device ufs-lu,bus=ufs0,drive=drv1,lun=1 ",
+                           tmp_path);
+
+    return arg;
+}
+
+static void ufs_register_nodes(void)
+{
+    const char *arch;
+    QOSGraphEdgeOptions edge_opts = {
+        .before_cmd_line = "-blockdev null-co,node-name=drv0,read-zeroes=on",
+        .after_cmd_line = "-device ufs-lu,bus=ufs0,drive=drv0,lun=0",
+        .extra_device_opts = "addr=04.0,id=ufs0,nutrs=32,nutmrs=8"
+    };
+
+    QOSGraphTestOptions io_test_opts = {
+        .before = ufs_blk_test_setup,
+    };
+
+    add_qpci_address(&edge_opts, &(QPCIAddress){ .devfn = QPCI_DEVFN(4, 0) });
+
+    qos_node_create_driver("ufs", ufs_create);
+    qos_node_consumes("ufs", "pci-bus", &edge_opts);
+    qos_node_produces("ufs", "pci-device");
+
+    qos_add_test("reg-read", "ufs", ufstest_reg_read, NULL);
+
+    /*
+     * Check architecture
+     * TODO: Enable ufs io tests for ppc64
+     */
+    arch = qtest_get_arch();
+    if (!strcmp(arch, "ppc64")) {
+        g_test_message("Skipping ufs io tests for ppc64");
+        return;
+    }
+    qos_add_test("init", "ufs", ufstest_init, NULL);
+    qos_add_test("read-write", "ufs", ufstest_read_write, &io_test_opts);
+}
+
+libqos_init(ufs_register_nodes);
index 866fb577f30e9e3d6d91d72f493fcedcbed519be..7e73948f5e3de984d8d5d83066b97b3bdf3a8b24 100644 (file)
@@ -571,7 +571,7 @@ static int sortelem_cmp_src_index(const void *a, const void *b)
  */
 void qemu_iovec_clone(QEMUIOVector *dest, const QEMUIOVector *src, void *buf)
 {
-    IOVectorSortElem sortelems[src->niov];
+    g_autofree IOVectorSortElem *sortelems = g_new(IOVectorSortElem, src->niov);
     void *last_end;
     int i;
 
index cd17fb53265b573dffe406d0b637ff552edf73fc..b4b6bf30a21162130778cc32652a699dbe891b74 100644 (file)
@@ -127,7 +127,14 @@ vu_message_read(VuDev *vu_dev, int conn_fd, VhostUserMsg *vmsg)
         if (rc < 0) {
             if (rc == QIO_CHANNEL_ERR_BLOCK) {
                 assert(local_err == NULL);
-                qio_channel_yield(ioc, G_IO_IN);
+                if (server->ctx) {
+                    server->in_qio_channel_yield = true;
+                    qio_channel_yield(ioc, G_IO_IN);
+                    server->in_qio_channel_yield = false;
+                } else {
+                    /* Wait until attached to an AioContext again */
+                    qemu_coroutine_yield();
+                }
                 continue;
             } else {
                 error_report_err(local_err);
@@ -278,7 +285,7 @@ set_watch(VuDev *vu_dev, int fd, int vu_evt,
         vu_fd_watch->fd = fd;
         vu_fd_watch->cb = cb;
         qemu_socket_set_nonblock(fd);
-        aio_set_fd_handler(server->ioc->ctx, fd, kick_handler,
+        aio_set_fd_handler(server->ctx, fd, kick_handler,
                            NULL, NULL, NULL, vu_fd_watch);
         vu_fd_watch->vu_dev = vu_dev;
         vu_fd_watch->pvt = pvt;
@@ -299,7 +306,7 @@ static void remove_watch(VuDev *vu_dev, int fd)
     if (!vu_fd_watch) {
         return;
     }
-    aio_set_fd_handler(server->ioc->ctx, fd, NULL, NULL, NULL, NULL, NULL);
+    aio_set_fd_handler(server->ctx, fd, NULL, NULL, NULL, NULL, NULL);
 
     QTAILQ_REMOVE(&server->vu_fd_watches, vu_fd_watch, next);
     g_free(vu_fd_watch);
@@ -344,6 +351,8 @@ static void vu_accept(QIONetListener *listener, QIOChannelSocket *sioc,
     /* TODO vu_message_write() spins if non-blocking! */
     qio_channel_set_blocking(server->ioc, false, NULL);
 
+    qio_channel_set_follow_coroutine_ctx(server->ioc, true);
+
     server->co_trip = qemu_coroutine_create(vu_client_trip, server);
 
     aio_context_acquire(server->ctx);
@@ -399,13 +408,12 @@ void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx)
         return;
     }
 
-    qio_channel_attach_aio_context(server->ioc, ctx);
-
     QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
         aio_set_fd_handler(ctx, vu_fd_watch->fd, kick_handler, NULL,
                            NULL, NULL, vu_fd_watch);
     }
 
+    assert(!server->in_qio_channel_yield);
     aio_co_schedule(ctx, server->co_trip);
 }
 
@@ -419,11 +427,16 @@ void vhost_user_server_detach_aio_context(VuServer *server)
             aio_set_fd_handler(server->ctx, vu_fd_watch->fd,
                                NULL, NULL, NULL, NULL, vu_fd_watch);
         }
-
-        qio_channel_detach_aio_context(server->ioc);
     }
 
     server->ctx = NULL;
+
+    if (server->ioc) {
+        if (server->in_qio_channel_yield) {
+            /* Stop receiving the next vhost-user message */
+            qio_channel_wake_read(server->ioc);
+        }
+    }
 }
 
 bool vhost_user_server_start(VuServer *server,