aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2023-09-08 07:09:04 -0400
committerStefan Hajnoczi <stefanha@redhat.com>2023-09-08 07:09:04 -0400
commit2f352bc2270fdc896beb2957b5171b03ba3e2d66 (patch)
treeb0fbcd22c1c1470468b0aff5fdecc441bf0d6f52 /hw
parent03a3a62fbd0aa5227e978eef3c67d3978aec9e5f (diff)
parent631c872614aca91eaf947c1748f0f27f99635d92 (diff)
downloadqemu-2f352bc2270fdc896beb2957b5171b03ba3e2d66.zip
qemu-2f352bc2270fdc896beb2957b5171b03ba3e2d66.tar.gz
qemu-2f352bc2270fdc896beb2957b5171b03ba3e2d66.tar.bz2
Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging
Pull request - Jeuk Kim's emulated UFS device - Fabiano Rosas' IOThread GSource "name" debugging aid # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmT6E3YACgkQnKSrs4Gr # c8g3NwgAxVSzxS6hGCs6hKjRJ6f8HIlBWWbeO273XWbuAsMUbbYYAw7Gx6bzrbnb # NFdFZ4F3M0lOfIaGFONasA4Wo2rIuiWVfj6Es4pcYjnexF0OmhAib2RLLEtuDlyc # EJB/s4Mo2Xk+aw8LzQoYJVvDYi+hTZMmaHaQmNAsYLyQmsN/zg5iEE/vMDqVCNOs # trv9rMGOJBblzJ4bqgItJ80nSVaEbwErpiU1+NTY77dpTeIt/3nq7yGAo+h4rVpc # Tv4IuH8y6Rb9xjlwSANL2YKS2VOuJc5J/G5vpM63T4O2yzYSosTsU+ET/ZqB2U1H # v0uT9K2akfsTOFR8rUaroFsXha8IMQ== # =6SqP # -----END PGP SIGNATURE----- # gpg: Signature made Thu 07 Sep 2023 14:16:22 EDT # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [ultimate] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [ultimate] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * tag 'block-pull-request' of https://gitlab.com/stefanha/qemu: tests/qtest: Introduce tests for UFS hw/ufs: Support for UFS logical unit hw/ufs: Support for Query Transfer Requests hw/ufs: Initial commit for emulated Universal-Flash-Storage iothread: Set the GSource "name" field Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'hw')
-rw-r--r--hw/Kconfig1
-rw-r--r--hw/meson.build1
-rw-r--r--hw/ufs/Kconfig4
-rw-r--r--hw/ufs/lu.c1445
-rw-r--r--hw/ufs/meson.build1
-rw-r--r--hw/ufs/trace-events58
-rw-r--r--hw/ufs/trace.h1
-rw-r--r--hw/ufs/ufs.c1502
-rw-r--r--hw/ufs/ufs.h131
9 files changed, 3144 insertions, 0 deletions
diff --git a/hw/Kconfig b/hw/Kconfig
index ba62ff6..9ca7b38 100644
--- a/hw/Kconfig
+++ b/hw/Kconfig
@@ -38,6 +38,7 @@ source smbios/Kconfig
source ssi/Kconfig
source timer/Kconfig
source tpm/Kconfig
+source ufs/Kconfig
source usb/Kconfig
source virtio/Kconfig
source vfio/Kconfig
diff --git a/hw/meson.build b/hw/meson.build
index c7ac7d3..f01fac4 100644
--- a/hw/meson.build
+++ b/hw/meson.build
@@ -37,6 +37,7 @@ subdir('smbios')
subdir('ssi')
subdir('timer')
subdir('tpm')
+subdir('ufs')
subdir('usb')
subdir('vfio')
subdir('virtio')
diff --git a/hw/ufs/Kconfig b/hw/ufs/Kconfig
new file mode 100644
index 0000000..b7b3392
--- /dev/null
+++ b/hw/ufs/Kconfig
@@ -0,0 +1,4 @@
+config UFS_PCI
+ bool
+ default y if PCI_DEVICES
+ depends on PCI
diff --git a/hw/ufs/lu.c b/hw/ufs/lu.c
new file mode 100644
index 0000000..e1c46bd
--- /dev/null
+++ b/hw/ufs/lu.c
@@ -0,0 +1,1445 @@
+/*
+ * QEMU UFS Logical Unit
+ *
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Written by Jeuk Kim <jeuk20.kim@samsung.com>
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "qapi/error.h"
+#include "qemu/memalign.h"
+#include "hw/scsi/scsi.h"
+#include "scsi/constants.h"
+#include "sysemu/block-backend.h"
+#include "qemu/cutils.h"
+#include "trace.h"
+#include "ufs.h"
+
+/*
+ * The code below handling SCSI commands is copied from hw/scsi/scsi-disk.c,
+ * with minor adjustments to make it work for UFS.
+ */
+
+#define SCSI_DMA_BUF_SIZE (128 * KiB)
+#define SCSI_MAX_INQUIRY_LEN 256
+#define SCSI_INQUIRY_DATA_SIZE 36
+#define SCSI_MAX_MODE_LEN 256
+
+typedef struct UfsSCSIReq {
+ SCSIRequest req;
+ /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */
+ uint64_t sector;
+ uint32_t sector_count;
+ uint32_t buflen;
+ bool started;
+ bool need_fua_emulation;
+ struct iovec iov;
+ QEMUIOVector qiov;
+ BlockAcctCookie acct;
+} UfsSCSIReq;
+
+static void ufs_scsi_free_request(SCSIRequest *req)
+{
+ UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+
+ qemu_vfree(r->iov.iov_base);
+}
+
+static void scsi_check_condition(UfsSCSIReq *r, SCSISense sense)
+{
+ trace_ufs_scsi_check_condition(r->req.tag, sense.key, sense.asc,
+ sense.ascq);
+ scsi_req_build_sense(&r->req, sense);
+ scsi_req_complete(&r->req, CHECK_CONDITION);
+}
+
+static int ufs_scsi_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf,
+ uint32_t outbuf_len)
+{
+ UfsHc *u = UFS(req->bus->qbus.parent);
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev);
+ uint8_t page_code = req->cmd.buf[2];
+ int start, buflen = 0;
+
+ if (outbuf_len < SCSI_INQUIRY_DATA_SIZE) {
+ return -1;
+ }
+
+ outbuf[buflen++] = lu->qdev.type & 0x1f;
+ outbuf[buflen++] = page_code;
+ outbuf[buflen++] = 0x00;
+ outbuf[buflen++] = 0x00;
+ start = buflen;
+
+ switch (page_code) {
+ case 0x00: /* Supported page codes, mandatory */
+ {
+ trace_ufs_scsi_emulate_vpd_page_00(req->cmd.xfer);
+ outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
+ if (u->params.serial) {
+ outbuf[buflen++] = 0x80; /* unit serial number */
+ }
+ outbuf[buflen++] = 0x87; /* mode page policy */
+ break;
+ }
+ case 0x80: /* Device serial number, optional */
+ {
+ int l;
+
+ if (!u->params.serial) {
+ trace_ufs_scsi_emulate_vpd_page_80_not_supported();
+ return -1;
+ }
+
+ l = strlen(u->params.serial);
+ if (l > SCSI_INQUIRY_DATA_SIZE) {
+ l = SCSI_INQUIRY_DATA_SIZE;
+ }
+
+ trace_ufs_scsi_emulate_vpd_page_80(req->cmd.xfer);
+ memcpy(outbuf + buflen, u->params.serial, l);
+ buflen += l;
+ break;
+ }
+ case 0x87: /* Mode Page Policy, mandatory */
+ {
+ trace_ufs_scsi_emulate_vpd_page_87(req->cmd.xfer);
+ outbuf[buflen++] = 0x3f; /* apply to all mode pages and subpages */
+ outbuf[buflen++] = 0xff;
+ outbuf[buflen++] = 0; /* shared */
+ outbuf[buflen++] = 0;
+ break;
+ }
+ default:
+ return -1;
+ }
+ /* done with EVPD */
+ assert(buflen - start <= 255);
+ outbuf[start - 1] = buflen - start;
+ return buflen;
+}
+
+static int ufs_scsi_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf,
+ uint32_t outbuf_len)
+{
+ int buflen = 0;
+
+ if (outbuf_len < SCSI_INQUIRY_DATA_SIZE) {
+ return -1;
+ }
+
+ if (req->cmd.buf[1] & 0x1) {
+ /* Vital product data */
+ return ufs_scsi_emulate_vpd_page(req, outbuf, outbuf_len);
+ }
+
+ /* Standard INQUIRY data */
+ if (req->cmd.buf[2] != 0) {
+ return -1;
+ }
+
+ /* PAGE CODE == 0 */
+ buflen = req->cmd.xfer;
+ if (buflen > SCSI_MAX_INQUIRY_LEN) {
+ buflen = SCSI_MAX_INQUIRY_LEN;
+ }
+
+ if (is_wlun(req->lun)) {
+ outbuf[0] = TYPE_WLUN;
+ } else {
+ outbuf[0] = 0;
+ }
+ outbuf[1] = 0;
+
+ strpadcpy((char *)&outbuf[16], 16, "QEMU UFS", ' ');
+ strpadcpy((char *)&outbuf[8], 8, "QEMU", ' ');
+
+ memset(&outbuf[32], 0, 4);
+
+ outbuf[2] = 0x06; /* SPC-4 */
+ outbuf[3] = 0x2;
+
+ if (buflen > SCSI_INQUIRY_DATA_SIZE) {
+ outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
+ } else {
+ /*
+ * If the allocation length of CDB is too small, the additional
+ * length is not adjusted
+ */
+ outbuf[4] = SCSI_INQUIRY_DATA_SIZE - 5;
+ }
+
+ /* Support TCQ. */
+ outbuf[7] = req->bus->info->tcq ? 0x02 : 0;
+ return buflen;
+}
+
+static int mode_sense_page(UfsLu *lu, int page, uint8_t **p_outbuf,
+ int page_control)
+{
+ static const int mode_sense_valid[0x3f] = {
+ [MODE_PAGE_CACHING] = 1,
+ [MODE_PAGE_R_W_ERROR] = 1,
+ [MODE_PAGE_CONTROL] = 1,
+ };
+
+ uint8_t *p = *p_outbuf + 2;
+ int length;
+
+ assert(page < ARRAY_SIZE(mode_sense_valid));
+ if ((mode_sense_valid[page]) == 0) {
+ return -1;
+ }
+
+ /*
+ * If Changeable Values are requested, a mask denoting those mode parameters
+ * that are changeable shall be returned. As we currently don't support
+ * parameter changes via MODE_SELECT all bits are returned set to zero.
+ * The buffer was already memset to zero by the caller of this function.
+ */
+ switch (page) {
+ case MODE_PAGE_CACHING:
+ length = 0x12;
+ if (page_control == 1 || /* Changeable Values */
+ blk_enable_write_cache(lu->qdev.conf.blk)) {
+ p[0] = 4; /* WCE */
+ }
+ break;
+
+ case MODE_PAGE_R_W_ERROR:
+ length = 10;
+ if (page_control == 1) { /* Changeable Values */
+ break;
+ }
+ p[0] = 0x80; /* Automatic Write Reallocation Enabled */
+ break;
+
+ case MODE_PAGE_CONTROL:
+ length = 10;
+ if (page_control == 1) { /* Changeable Values */
+ break;
+ }
+ p[1] = 0x10; /* Queue Algorithm modifier */
+ p[8] = 0xff; /* Busy Timeout Period */
+ p[9] = 0xff;
+ break;
+
+ default:
+ return -1;
+ }
+
+ assert(length < 256);
+ (*p_outbuf)[0] = page;
+ (*p_outbuf)[1] = length;
+ *p_outbuf += length + 2;
+ return length + 2;
+}
+
+static int ufs_scsi_emulate_mode_sense(UfsSCSIReq *r, uint8_t *outbuf)
+{
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+ bool dbd;
+ int page, buflen, ret, page_control;
+ uint8_t *p;
+ uint8_t dev_specific_param = 0;
+
+ dbd = (r->req.cmd.buf[1] & 0x8) != 0;
+ if (!dbd) {
+ return -1;
+ }
+
+ page = r->req.cmd.buf[2] & 0x3f;
+ page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
+
+ trace_ufs_scsi_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
+ 10,
+ page, r->req.cmd.xfer, page_control);
+ memset(outbuf, 0, r->req.cmd.xfer);
+ p = outbuf;
+
+ if (!blk_is_writable(lu->qdev.conf.blk)) {
+ dev_specific_param |= 0x80; /* Readonly. */
+ }
+
+ p[2] = 0; /* Medium type. */
+ p[3] = dev_specific_param;
+ p[6] = p[7] = 0; /* Block descriptor length. */
+ p += 8;
+
+ if (page_control == 3) {
+ /* Saved Values */
+ scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
+ return -1;
+ }
+
+ if (page == 0x3f) {
+ for (page = 0; page <= 0x3e; page++) {
+ mode_sense_page(lu, page, &p, page_control);
+ }
+ } else {
+ ret = mode_sense_page(lu, page, &p, page_control);
+ if (ret == -1) {
+ return -1;
+ }
+ }
+
+ buflen = p - outbuf;
+ /*
+ * The mode data length field specifies the length in bytes of the
+ * following data that is available to be transferred. The mode data
+ * length does not include itself.
+ */
+ outbuf[0] = ((buflen - 2) >> 8) & 0xff;
+ outbuf[1] = (buflen - 2) & 0xff;
+ return buflen;
+}
+
+/*
+ * scsi_handle_rw_error has two return values. False means that the error
+ * must be ignored, true means that the error has been processed and the
+ * caller should not do anything else for this request. Note that
+ * scsi_handle_rw_error always manages its reference counts, independent
+ * of the return value.
+ */
+static bool scsi_handle_rw_error(UfsSCSIReq *r, int ret, bool acct_failed)
+{
+ bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+ SCSISense sense = SENSE_CODE(NO_SENSE);
+ int error = 0;
+ bool req_has_sense = false;
+ BlockErrorAction action;
+ int status;
+
+ if (ret < 0) {
+ status = scsi_sense_from_errno(-ret, &sense);
+ error = -ret;
+ } else {
+ /* A passthrough command has completed with nonzero status. */
+ status = ret;
+ if (status == CHECK_CONDITION) {
+ req_has_sense = true;
+ error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
+ } else {
+ error = EINVAL;
+ }
+ }
+
+ /*
+ * Check whether the error has to be handled by the guest or should
+ * rather follow the rerror=/werror= settings. Guest-handled errors
+ * are usually retried immediately, so do not post them to QMP and
+ * do not account them as failed I/O.
+ */
+ if (req_has_sense && scsi_sense_buf_is_guest_recoverable(
+ r->req.sense, sizeof(r->req.sense))) {
+ action = BLOCK_ERROR_ACTION_REPORT;
+ acct_failed = false;
+ } else {
+ action = blk_get_error_action(lu->qdev.conf.blk, is_read, error);
+ blk_error_action(lu->qdev.conf.blk, action, is_read, error);
+ }
+
+ switch (action) {
+ case BLOCK_ERROR_ACTION_REPORT:
+ if (acct_failed) {
+ block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+ }
+ if (!req_has_sense && status == CHECK_CONDITION) {
+ scsi_req_build_sense(&r->req, sense);
+ }
+ scsi_req_complete(&r->req, status);
+ return true;
+
+ case BLOCK_ERROR_ACTION_IGNORE:
+ return false;
+
+ case BLOCK_ERROR_ACTION_STOP:
+ scsi_req_retry(&r->req);
+ return true;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static bool ufs_scsi_req_check_error(UfsSCSIReq *r, int ret, bool acct_failed)
+{
+ if (r->req.io_canceled) {
+ scsi_req_cancel_complete(&r->req);
+ return true;
+ }
+
+ if (ret < 0) {
+ return scsi_handle_rw_error(r, ret, acct_failed);
+ }
+
+ return false;
+}
+
+static void scsi_aio_complete(void *opaque, int ret)
+{
+ UfsSCSIReq *r = (UfsSCSIReq *)opaque;
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+ assert(r->req.aiocb != NULL);
+ r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
+ if (ufs_scsi_req_check_error(r, ret, true)) {
+ goto done;
+ }
+
+ block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+ scsi_req_complete(&r->req, GOOD);
+
+done:
+ aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
+ scsi_req_unref(&r->req);
+}
+
+static int32_t ufs_scsi_emulate_command(SCSIRequest *req, uint8_t *buf)
+{
+ UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev);
+ uint32_t last_block = 0;
+ uint8_t *outbuf;
+ int buflen;
+
+ switch (req->cmd.buf[0]) {
+ case INQUIRY:
+ case MODE_SENSE_10:
+ case START_STOP:
+ case REQUEST_SENSE:
+ break;
+
+ default:
+ if (!blk_is_available(lu->qdev.conf.blk)) {
+ scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
+ return 0;
+ }
+ break;
+ }
+
+ /*
+ * FIXME: we shouldn't return anything bigger than 4k, but the code
+ * requires the buffer to be as big as req->cmd.xfer in several
+ * places. So, do not allow CDBs with a very large ALLOCATION
+ * LENGTH. The real fix would be to modify scsi_read_data and
+ * dma_buf_read, so that they return data beyond the buflen
+ * as all zeros.
+ */
+ if (req->cmd.xfer > 65536) {
+ goto illegal_request;
+ }
+ r->buflen = MAX(4096, req->cmd.xfer);
+
+ if (!r->iov.iov_base) {
+ r->iov.iov_base = blk_blockalign(lu->qdev.conf.blk, r->buflen);
+ }
+
+ outbuf = r->iov.iov_base;
+ memset(outbuf, 0, r->buflen);
+ switch (req->cmd.buf[0]) {
+ case TEST_UNIT_READY:
+ assert(blk_is_available(lu->qdev.conf.blk));
+ break;
+ case INQUIRY:
+ buflen = ufs_scsi_emulate_inquiry(req, outbuf, r->buflen);
+ if (buflen < 0) {
+ goto illegal_request;
+ }
+ break;
+ case MODE_SENSE_10:
+ buflen = ufs_scsi_emulate_mode_sense(r, outbuf);
+ if (buflen < 0) {
+ goto illegal_request;
+ }
+ break;
+ case READ_CAPACITY_10:
+ /* The normal LEN field for this command is zero. */
+ memset(outbuf, 0, 8);
+ if (lu->qdev.max_lba > 0) {
+ last_block = lu->qdev.max_lba - 1;
+ };
+ outbuf[0] = (last_block >> 24) & 0xff;
+ outbuf[1] = (last_block >> 16) & 0xff;
+ outbuf[2] = (last_block >> 8) & 0xff;
+ outbuf[3] = last_block & 0xff;
+ outbuf[4] = (lu->qdev.blocksize >> 24) & 0xff;
+ outbuf[5] = (lu->qdev.blocksize >> 16) & 0xff;
+ outbuf[6] = (lu->qdev.blocksize >> 8) & 0xff;
+ outbuf[7] = lu->qdev.blocksize & 0xff;
+ break;
+ case REQUEST_SENSE:
+ /* Just return "NO SENSE". */
+ buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
+ (req->cmd.buf[1] & 1) == 0);
+ if (buflen < 0) {
+ goto illegal_request;
+ }
+ break;
+ case SYNCHRONIZE_CACHE:
+ /* The request is used as the AIO opaque value, so add a ref. */
+ scsi_req_ref(&r->req);
+ block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0,
+ BLOCK_ACCT_FLUSH);
+ r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r);
+ return 0;
+ case VERIFY_10:
+ trace_ufs_scsi_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
+ if (req->cmd.buf[1] & 6) {
+ goto illegal_request;
+ }
+ break;
+ case SERVICE_ACTION_IN_16:
+ /* Service Action In subcommands. */
+ if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
+ trace_ufs_scsi_emulate_command_SAI_16();
+ memset(outbuf, 0, req->cmd.xfer);
+
+ if (lu->qdev.max_lba > 0) {
+ last_block = lu->qdev.max_lba - 1;
+ };
+ outbuf[0] = 0;
+ outbuf[1] = 0;
+ outbuf[2] = 0;
+ outbuf[3] = 0;
+ outbuf[4] = (last_block >> 24) & 0xff;
+ outbuf[5] = (last_block >> 16) & 0xff;
+ outbuf[6] = (last_block >> 8) & 0xff;
+ outbuf[7] = last_block & 0xff;
+ outbuf[8] = (lu->qdev.blocksize >> 24) & 0xff;
+ outbuf[9] = (lu->qdev.blocksize >> 16) & 0xff;
+ outbuf[10] = (lu->qdev.blocksize >> 8) & 0xff;
+ outbuf[11] = lu->qdev.blocksize & 0xff;
+ outbuf[12] = 0;
+ outbuf[13] = get_physical_block_exp(&lu->qdev.conf);
+
+ if (lu->unit_desc.provisioning_type == 2 ||
+ lu->unit_desc.provisioning_type == 3) {
+ outbuf[14] = 0x80;
+ }
+ /* Protection, exponent and lowest lba field left blank. */
+ break;
+ }
+ trace_ufs_scsi_emulate_command_SAI_unsupported();
+ goto illegal_request;
+ case MODE_SELECT_10:
+ trace_ufs_scsi_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
+ break;
+ case START_STOP:
+ /*
+ * TODO: START_STOP is not yet implemented. It always returns success.
+ * Revisit it when ufs power management is implemented.
+ */
+ trace_ufs_scsi_emulate_command_START_STOP();
+ break;
+ case FORMAT_UNIT:
+ trace_ufs_scsi_emulate_command_FORMAT_UNIT();
+ break;
+ case SEND_DIAGNOSTIC:
+ trace_ufs_scsi_emulate_command_SEND_DIAGNOSTIC();
+ break;
+ default:
+ trace_ufs_scsi_emulate_command_UNKNOWN(buf[0],
+ scsi_command_name(buf[0]));
+ scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
+ return 0;
+ }
+ assert(!r->req.aiocb);
+ r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
+ if (r->iov.iov_len == 0) {
+ scsi_req_complete(&r->req, GOOD);
+ }
+ if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
+ assert(r->iov.iov_len == req->cmd.xfer);
+ return -r->iov.iov_len;
+ } else {
+ return r->iov.iov_len;
+ }
+
+illegal_request:
+ if (r->req.status == -1) {
+ scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
+ }
+ return 0;
+}
+
+static void ufs_scsi_emulate_read_data(SCSIRequest *req)
+{
+ UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+ int buflen = r->iov.iov_len;
+
+ if (buflen) {
+ trace_ufs_scsi_emulate_read_data(buflen);
+ r->iov.iov_len = 0;
+ r->started = true;
+ scsi_req_data(&r->req, buflen);
+ return;
+ }
+
+ /* This also clears the sense buffer for REQUEST SENSE. */
+ scsi_req_complete(&r->req, GOOD);
+}
+
+static int ufs_scsi_check_mode_select(UfsLu *lu, int page, uint8_t *inbuf,
+ int inlen)
+{
+ uint8_t mode_current[SCSI_MAX_MODE_LEN];
+ uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
+ uint8_t *p;
+ int len, expected_len, changeable_len, i;
+
+ /*
+ * The input buffer does not include the page header, so it is
+ * off by 2 bytes.
+ */
+ expected_len = inlen + 2;
+ if (expected_len > SCSI_MAX_MODE_LEN) {
+ return -1;
+ }
+
+ /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */
+ if (page == MODE_PAGE_ALLS) {
+ return -1;
+ }
+
+ p = mode_current;
+ memset(mode_current, 0, inlen + 2);
+ len = mode_sense_page(lu, page, &p, 0);
+ if (len < 0 || len != expected_len) {
+ return -1;
+ }
+
+ p = mode_changeable;
+ memset(mode_changeable, 0, inlen + 2);
+ changeable_len = mode_sense_page(lu, page, &p, 1);
+ assert(changeable_len == len);
+
+ /*
+ * Check that unchangeable bits are the same as what MODE SENSE
+ * would return.
+ */
+ for (i = 2; i < len; i++) {
+ if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void ufs_scsi_apply_mode_select(UfsLu *lu, int page, uint8_t *p)
+{
+ switch (page) {
+ case MODE_PAGE_CACHING:
+ blk_set_enable_write_cache(lu->qdev.conf.blk, (p[0] & 4) != 0);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int mode_select_pages(UfsSCSIReq *r, uint8_t *p, int len, bool change)
+{
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+ while (len > 0) {
+ int page, page_len;
+
+ page = p[0] & 0x3f;
+ if (p[0] & 0x40) {
+ goto invalid_param;
+ } else {
+ if (len < 2) {
+ goto invalid_param_len;
+ }
+ page_len = p[1];
+ p += 2;
+ len -= 2;
+ }
+
+ if (page_len > len) {
+ goto invalid_param_len;
+ }
+
+ if (!change) {
+ if (ufs_scsi_check_mode_select(lu, page, p, page_len) < 0) {
+ goto invalid_param;
+ }
+ } else {
+ ufs_scsi_apply_mode_select(lu, page, p);
+ }
+
+ p += page_len;
+ len -= page_len;
+ }
+ return 0;
+
+invalid_param:
+ scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
+ return -1;
+
+invalid_param_len:
+ scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
+ return -1;
+}
+
+static void ufs_scsi_emulate_mode_select(UfsSCSIReq *r, uint8_t *inbuf)
+{
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+ uint8_t *p = inbuf;
+ int len = r->req.cmd.xfer;
+ int hdr_len = 8;
+ int bd_len;
+ int pass;
+
+ /* We only support PF=1, SP=0. */
+ if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
+ goto invalid_field;
+ }
+
+ if (len < hdr_len) {
+ goto invalid_param_len;
+ }
+
+ bd_len = lduw_be_p(&p[6]);
+ if (bd_len != 0) {
+ goto invalid_param;
+ }
+
+ len -= hdr_len;
+ p += hdr_len;
+
+ /* Ensure no change is made if there is an error! */
+ for (pass = 0; pass < 2; pass++) {
+ if (mode_select_pages(r, p, len, pass == 1) < 0) {
+ assert(pass == 0);
+ return;
+ }
+ }
+
+ if (!blk_enable_write_cache(lu->qdev.conf.blk)) {
+ /* The request is used as the AIO opaque value, so add a ref. */
+ scsi_req_ref(&r->req);
+ block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0,
+ BLOCK_ACCT_FLUSH);
+ r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r);
+ return;
+ }
+
+ scsi_req_complete(&r->req, GOOD);
+ return;
+
+invalid_param:
+ scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
+ return;
+
+invalid_param_len:
+ scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
+ return;
+
+invalid_field:
+ scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
+}
+
+/* block_num and nb_blocks expected to be in qdev blocksize */
+static inline bool check_lba_range(UfsLu *lu, uint64_t block_num,
+ uint32_t nb_blocks)
+{
+ /*
+ * The first line tests that no overflow happens when computing the last
+ * block. The second line tests that the last accessed block is in
+ * range.
+ *
+ * Careful, the computations should not underflow for nb_blocks == 0,
+ * and a 0-block read to the first LBA beyond the end of device is
+ * valid.
+ */
+ return (block_num <= block_num + nb_blocks &&
+ block_num + nb_blocks <= lu->qdev.max_lba + 1);
+}
+
+static void ufs_scsi_emulate_write_data(SCSIRequest *req)
+{
+ UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+
+ if (r->iov.iov_len) {
+ int buflen = r->iov.iov_len;
+ trace_ufs_scsi_emulate_write_data(buflen);
+ r->iov.iov_len = 0;
+ scsi_req_data(&r->req, buflen);
+ return;
+ }
+
+ switch (req->cmd.buf[0]) {
+ case MODE_SELECT_10:
+ /* This also clears the sense buffer for REQUEST SENSE. */
+ ufs_scsi_emulate_mode_select(r, r->iov.iov_base);
+ break;
+ default:
+ abort();
+ }
+}
+
+/* Return a pointer to the data buffer. */
+static uint8_t *ufs_scsi_get_buf(SCSIRequest *req)
+{
+ UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+
+ return (uint8_t *)r->iov.iov_base;
+}
+
+static int32_t ufs_scsi_dma_command(SCSIRequest *req, uint8_t *buf)
+{
+ UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev);
+ uint32_t len;
+ uint8_t command;
+
+ command = buf[0];
+
+ if (!blk_is_available(lu->qdev.conf.blk)) {
+ scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
+ return 0;
+ }
+
+ len = scsi_data_cdb_xfer(r->req.cmd.buf);
+ switch (command) {
+ case READ_6:
+ case READ_10:
+ trace_ufs_scsi_dma_command_READ(r->req.cmd.lba, len);
+ if (r->req.cmd.buf[1] & 0xe0) {
+ goto illegal_request;
+ }
+ if (!check_lba_range(lu, r->req.cmd.lba, len)) {
+ goto illegal_lba;
+ }
+ r->sector = r->req.cmd.lba * (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
+ r->sector_count = len * (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
+ break;
+ case WRITE_6:
+ case WRITE_10:
+ trace_ufs_scsi_dma_command_WRITE(r->req.cmd.lba, len);
+ if (!blk_is_writable(lu->qdev.conf.blk)) {
+ scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
+ return 0;
+ }
+ if (r->req.cmd.buf[1] & 0xe0) {
+ goto illegal_request;
+ }
+ if (!check_lba_range(lu, r->req.cmd.lba, len)) {
+ goto illegal_lba;
+ }
+ r->sector = r->req.cmd.lba * (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
+ r->sector_count = len * (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
+ break;
+ default:
+ abort();
+ illegal_request:
+ scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
+ return 0;
+ illegal_lba:
+ scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
+ return 0;
+ }
+ r->need_fua_emulation = ((r->req.cmd.buf[1] & 8) != 0);
+ if (r->sector_count == 0) {
+ scsi_req_complete(&r->req, GOOD);
+ }
+ assert(r->iov.iov_len == 0);
+ if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
+ return -r->sector_count * BDRV_SECTOR_SIZE;
+ } else {
+ return r->sector_count * BDRV_SECTOR_SIZE;
+ }
+}
+
+static void scsi_write_do_fua(UfsSCSIReq *r)
+{
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+ assert(r->req.aiocb == NULL);
+ assert(!r->req.io_canceled);
+
+ if (r->need_fua_emulation) {
+ block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0,
+ BLOCK_ACCT_FLUSH);
+ r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r);
+ return;
+ }
+
+ scsi_req_complete(&r->req, GOOD);
+ scsi_req_unref(&r->req);
+}
+
+static void scsi_dma_complete_noio(UfsSCSIReq *r, int ret)
+{
+ assert(r->req.aiocb == NULL);
+ if (ufs_scsi_req_check_error(r, ret, false)) {
+ goto done;
+ }
+
+ r->sector += r->sector_count;
+ r->sector_count = 0;
+ if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
+ scsi_write_do_fua(r);
+ return;
+ } else {
+ scsi_req_complete(&r->req, GOOD);
+ }
+
+done:
+ scsi_req_unref(&r->req);
+}
+
+static void scsi_dma_complete(void *opaque, int ret)
+{
+ UfsSCSIReq *r = (UfsSCSIReq *)opaque;
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+ assert(r->req.aiocb != NULL);
+ r->req.aiocb = NULL;
+
+ aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+ } else {
+ block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+ }
+ scsi_dma_complete_noio(r, ret);
+ aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
+}
+
+static BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
+ BlockCompletionFunc *cb, void *cb_opaque,
+ void *opaque)
+{
+ UfsSCSIReq *r = opaque;
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+ return blk_aio_preadv(lu->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
+}
+
+static void scsi_init_iovec(UfsSCSIReq *r, size_t size)
+{
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+ if (!r->iov.iov_base) {
+ r->buflen = size;
+ r->iov.iov_base = blk_blockalign(lu->qdev.conf.blk, r->buflen);
+ }
+ r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen);
+ qemu_iovec_init_external(&r->qiov, &r->iov, 1);
+}
+
+static void scsi_read_complete_noio(UfsSCSIReq *r, int ret)
+{
+ uint32_t n;
+
+ assert(r->req.aiocb == NULL);
+ if (ufs_scsi_req_check_error(r, ret, false)) {
+ goto done;
+ }
+
+ n = r->qiov.size / BDRV_SECTOR_SIZE;
+ r->sector += n;
+ r->sector_count -= n;
+ scsi_req_data(&r->req, r->qiov.size);
+
+done:
+ scsi_req_unref(&r->req);
+}
+
+static void scsi_read_complete(void *opaque, int ret)
+{
+ UfsSCSIReq *r = (UfsSCSIReq *)opaque;
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+ assert(r->req.aiocb != NULL);
+ r->req.aiocb = NULL;
+ trace_ufs_scsi_read_data_count(r->sector_count);
+ aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+ } else {
+ block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+ trace_ufs_scsi_read_complete(r->req.tag, r->qiov.size);
+ }
+ scsi_read_complete_noio(r, ret);
+ aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
+}
+
+/* Actually issue a read to the block device. */
+static void scsi_do_read(UfsSCSIReq *r, int ret)
+{
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+ assert(r->req.aiocb == NULL);
+ if (ufs_scsi_req_check_error(r, ret, false)) {
+ goto done;
+ }
+
+ /* The request is used as the AIO opaque value, so add a ref. */
+ scsi_req_ref(&r->req);
+
+ if (r->req.sg) {
+ dma_acct_start(lu->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
+ r->req.residual -= r->req.sg->size;
+ r->req.aiocb = dma_blk_io(
+ blk_get_aio_context(lu->qdev.conf.blk), r->req.sg,
+ r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, scsi_dma_readv, r,
+ scsi_dma_complete, r, DMA_DIRECTION_FROM_DEVICE);
+ } else {
+ scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
+ block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct,
+ r->qiov.size, BLOCK_ACCT_READ);
+ r->req.aiocb = scsi_dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
+ scsi_read_complete, r, r);
+ }
+
+done:
+ scsi_req_unref(&r->req);
+}
+
+static void scsi_do_read_cb(void *opaque, int ret)
+{
+ UfsSCSIReq *r = (UfsSCSIReq *)opaque;
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+ assert(r->req.aiocb != NULL);
+ r->req.aiocb = NULL;
+
+ aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+ } else {
+ block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+ }
+ scsi_do_read(opaque, ret);
+ aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
+}
+
+/* Read more data from scsi device into buffer. */
+static void scsi_read_data(SCSIRequest *req)
+{
+ UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+ bool first;
+
+ trace_ufs_scsi_read_data_count(r->sector_count);
+ if (r->sector_count == 0) {
+ /* This also clears the sense buffer for REQUEST SENSE. */
+ scsi_req_complete(&r->req, GOOD);
+ return;
+ }
+
+ /* No data transfer may already be in progress */
+ assert(r->req.aiocb == NULL);
+
+ /* The request is used as the AIO opaque value, so add a ref. */
+ scsi_req_ref(&r->req);
+ if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
+ trace_ufs_scsi_read_data_invalid();
+ scsi_read_complete_noio(r, -EINVAL);
+ return;
+ }
+
+ if (!blk_is_available(req->dev->conf.blk)) {
+ scsi_read_complete_noio(r, -ENOMEDIUM);
+ return;
+ }
+
+ first = !r->started;
+ r->started = true;
+ if (first && r->need_fua_emulation) {
+ block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0,
+ BLOCK_ACCT_FLUSH);
+ r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_do_read_cb, r);
+ } else {
+ scsi_do_read(r, 0);
+ }
+}
+
+static void scsi_write_complete_noio(UfsSCSIReq *r, int ret)
+{
+ uint32_t n;
+
+ assert(r->req.aiocb == NULL);
+ if (ufs_scsi_req_check_error(r, ret, false)) {
+ goto done;
+ }
+
+ n = r->qiov.size / BDRV_SECTOR_SIZE;
+ r->sector += n;
+ r->sector_count -= n;
+ if (r->sector_count == 0) {
+ scsi_write_do_fua(r);
+ return;
+ } else {
+ scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
+ trace_ufs_scsi_write_complete_noio(r->req.tag, r->qiov.size);
+ scsi_req_data(&r->req, r->qiov.size);
+ }
+
+done:
+ scsi_req_unref(&r->req);
+}
+
+static void scsi_write_complete(void *opaque, int ret)
+{
+ UfsSCSIReq *r = (UfsSCSIReq *)opaque;
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+ assert(r->req.aiocb != NULL);
+ r->req.aiocb = NULL;
+
+ aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+ } else {
+ block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
+ }
+ scsi_write_complete_noio(r, ret);
+ aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
+}
+
+static BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
+ BlockCompletionFunc *cb, void *cb_opaque,
+ void *opaque)
+{
+ UfsSCSIReq *r = opaque;
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+ return blk_aio_pwritev(lu->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
+}
+
+static void scsi_write_data(SCSIRequest *req)
+{
+ UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
+
+ /* No data transfer may already be in progress */
+ assert(r->req.aiocb == NULL);
+
+ /* The request is used as the AIO opaque value, so add a ref. */
+ scsi_req_ref(&r->req);
+ if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
+ trace_ufs_scsi_write_data_invalid();
+ scsi_write_complete_noio(r, -EINVAL);
+ return;
+ }
+
+ if (!r->req.sg && !r->qiov.size) {
+ /* Called for the first time. Ask the driver to send us more data. */
+ r->started = true;
+ scsi_write_complete_noio(r, 0);
+ return;
+ }
+ if (!blk_is_available(req->dev->conf.blk)) {
+ scsi_write_complete_noio(r, -ENOMEDIUM);
+ return;
+ }
+
+ if (r->req.sg) {
+ dma_acct_start(lu->qdev.conf.blk, &r->acct, r->req.sg,
+ BLOCK_ACCT_WRITE);
+ r->req.residual -= r->req.sg->size;
+ r->req.aiocb = dma_blk_io(
+ blk_get_aio_context(lu->qdev.conf.blk), r->req.sg,
+ r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, scsi_dma_writev, r,
+ scsi_dma_complete, r, DMA_DIRECTION_TO_DEVICE);
+ } else {
+ block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct,
+ r->qiov.size, BLOCK_ACCT_WRITE);
+ r->req.aiocb = scsi_dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
+ scsi_write_complete, r, r);
+ }
+}
+
+static const SCSIReqOps ufs_scsi_emulate_reqops = {
+ .size = sizeof(UfsSCSIReq),
+ .free_req = ufs_scsi_free_request,
+ .send_command = ufs_scsi_emulate_command,
+ .read_data = ufs_scsi_emulate_read_data,
+ .write_data = ufs_scsi_emulate_write_data,
+ .get_buf = ufs_scsi_get_buf,
+};
+
+static const SCSIReqOps ufs_scsi_dma_reqops = {
+ .size = sizeof(UfsSCSIReq),
+ .free_req = ufs_scsi_free_request,
+ .send_command = ufs_scsi_dma_command,
+ .read_data = scsi_read_data,
+ .write_data = scsi_write_data,
+ .get_buf = ufs_scsi_get_buf,
+};
+
+/*
+ * Following commands are not yet supported
+ * PRE_FETCH(10),
+ * UNMAP,
+ * WRITE_BUFFER, READ_BUFFER,
+ * SECURITY_PROTOCOL_IN, SECURITY_PROTOCOL_OUT
+ */
+static const SCSIReqOps *const ufs_scsi_reqops_dispatch[256] = {
+ [TEST_UNIT_READY] = &ufs_scsi_emulate_reqops,
+ [INQUIRY] = &ufs_scsi_emulate_reqops,
+ [MODE_SENSE_10] = &ufs_scsi_emulate_reqops,
+ [START_STOP] = &ufs_scsi_emulate_reqops,
+ [READ_CAPACITY_10] = &ufs_scsi_emulate_reqops,
+ [REQUEST_SENSE] = &ufs_scsi_emulate_reqops,
+ [SYNCHRONIZE_CACHE] = &ufs_scsi_emulate_reqops,
+ [MODE_SELECT_10] = &ufs_scsi_emulate_reqops,
+ [VERIFY_10] = &ufs_scsi_emulate_reqops,
+ [FORMAT_UNIT] = &ufs_scsi_emulate_reqops,
+ [SERVICE_ACTION_IN_16] = &ufs_scsi_emulate_reqops,
+ [SEND_DIAGNOSTIC] = &ufs_scsi_emulate_reqops,
+
+ [READ_6] = &ufs_scsi_dma_reqops,
+ [READ_10] = &ufs_scsi_dma_reqops,
+ [WRITE_6] = &ufs_scsi_dma_reqops,
+ [WRITE_10] = &ufs_scsi_dma_reqops,
+};
+
+static SCSIRequest *scsi_new_request(SCSIDevice *dev, uint32_t tag,
+ uint32_t lun, uint8_t *buf,
+ void *hba_private)
+{
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev);
+ SCSIRequest *req;
+ const SCSIReqOps *ops;
+ uint8_t command;
+
+ command = buf[0];
+ ops = ufs_scsi_reqops_dispatch[command];
+ if (!ops) {
+ ops = &ufs_scsi_emulate_reqops;
+ }
+ req = scsi_req_alloc(ops, &lu->qdev, tag, lun, hba_private);
+
+ return req;
+}
+
+static Property ufs_lu_props[] = {
+ DEFINE_PROP_DRIVE("drive", UfsLu, qdev.conf.blk),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static bool ufs_lu_brdv_init(UfsLu *lu, Error **errp)
+{
+ SCSIDevice *dev = &lu->qdev;
+ bool read_only;
+
+ if (!lu->qdev.conf.blk) {
+ error_setg(errp, "drive property not set");
+ return false;
+ }
+
+ if (!blkconf_blocksizes(&lu->qdev.conf, errp)) {
+ return false;
+ }
+
+ if (blk_get_aio_context(lu->qdev.conf.blk) != qemu_get_aio_context() &&
+ !lu->qdev.hba_supports_iothread) {
+ error_setg(errp, "HBA does not support iothreads");
+ return false;
+ }
+
+ read_only = !blk_supports_write_perm(lu->qdev.conf.blk);
+
+ if (!blkconf_apply_backend_options(&dev->conf, read_only,
+ dev->type == TYPE_DISK, errp)) {
+ return false;
+ }
+
+ if (blk_is_sg(lu->qdev.conf.blk)) {
+ error_setg(errp, "unwanted /dev/sg*");
+ return false;
+ }
+
+ blk_iostatus_enable(lu->qdev.conf.blk);
+ return true;
+}
+
+static bool ufs_add_lu(UfsHc *u, UfsLu *lu, Error **errp)
+{
+ BlockBackend *blk = lu->qdev.conf.blk;
+ int64_t brdv_len = blk_getlength(blk);
+ uint64_t raw_dev_cap =
+ be64_to_cpu(u->geometry_desc.total_raw_device_capacity);
+
+ if (u->device_desc.number_lu >= UFS_MAX_LUS) {
+ error_setg(errp, "ufs host controller has too many logical units.");
+ return false;
+ }
+
+ if (u->lus[lu->lun] != NULL) {
+ error_setg(errp, "ufs logical unit %d already exists.", lu->lun);
+ return false;
+ }
+
+ u->lus[lu->lun] = lu;
+ u->device_desc.number_lu++;
+ raw_dev_cap += (brdv_len >> UFS_GEOMETRY_CAPACITY_SHIFT);
+ u->geometry_desc.total_raw_device_capacity = cpu_to_be64(raw_dev_cap);
+ return true;
+}
+
+static inline uint8_t ufs_log2(uint64_t input)
+{
+ int log = 0;
+ while (input >>= 1) {
+ log++;
+ }
+ return log;
+}
+
+static void ufs_init_lu(UfsLu *lu)
+{
+ BlockBackend *blk = lu->qdev.conf.blk;
+ int64_t brdv_len = blk_getlength(blk);
+
+ lu->lun = lu->qdev.lun;
+ memset(&lu->unit_desc, 0, sizeof(lu->unit_desc));
+ lu->unit_desc.length = sizeof(UnitDescriptor);
+ lu->unit_desc.descriptor_idn = UFS_QUERY_DESC_IDN_UNIT;
+ lu->unit_desc.lu_enable = 0x01;
+ lu->unit_desc.logical_block_size = ufs_log2(lu->qdev.blocksize);
+ lu->unit_desc.unit_index = lu->qdev.lun;
+ lu->unit_desc.logical_block_count =
+ cpu_to_be64(brdv_len / (1 << lu->unit_desc.logical_block_size));
+}
+
+static bool ufs_lu_check_constraints(UfsLu *lu, Error **errp)
+{
+ if (!lu->qdev.conf.blk) {
+ error_setg(errp, "drive property not set");
+ return false;
+ }
+
+ if (lu->qdev.channel != 0) {
+ error_setg(errp, "ufs logical unit does not support channel");
+ return false;
+ }
+
+ if (lu->qdev.lun >= UFS_MAX_LUS) {
+ error_setg(errp, "lun must be between 1 and %d", UFS_MAX_LUS - 1);
+ return false;
+ }
+
+ return true;
+}
+
+static void ufs_lu_realize(SCSIDevice *dev, Error **errp)
+{
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev);
+ BusState *s = qdev_get_parent_bus(&dev->qdev);
+ UfsHc *u = UFS(s->parent);
+ AioContext *ctx = NULL;
+ uint64_t nb_sectors, nb_blocks;
+
+ if (!ufs_lu_check_constraints(lu, errp)) {
+ return;
+ }
+
+ if (lu->qdev.conf.blk) {
+ ctx = blk_get_aio_context(lu->qdev.conf.blk);
+ aio_context_acquire(ctx);
+ if (!blkconf_blocksizes(&lu->qdev.conf, errp)) {
+ goto out;
+ }
+ }
+ lu->qdev.blocksize = UFS_BLOCK_SIZE;
+ blk_get_geometry(lu->qdev.conf.blk, &nb_sectors);
+ nb_blocks = nb_sectors / (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
+ if (nb_blocks > UINT32_MAX) {
+ nb_blocks = UINT32_MAX;
+ }
+ lu->qdev.max_lba = nb_blocks;
+ lu->qdev.type = TYPE_DISK;
+
+ ufs_init_lu(lu);
+ if (!ufs_add_lu(u, lu, errp)) {
+ goto out;
+ }
+
+ ufs_lu_brdv_init(lu, errp);
+out:
+ if (ctx) {
+ aio_context_release(ctx);
+ }
+}
+
+static void ufs_lu_unrealize(SCSIDevice *dev)
+{
+ UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev);
+
+ blk_drain(lu->qdev.conf.blk);
+}
+
+static void ufs_wlu_realize(DeviceState *qdev, Error **errp)
+{
+ UfsWLu *wlu = UFSWLU(qdev);
+ SCSIDevice *dev = &wlu->qdev;
+
+ if (!is_wlun(dev->lun)) {
+ error_setg(errp, "not well-known logical unit number");
+ return;
+ }
+
+ QTAILQ_INIT(&dev->requests);
+}
+
+static void ufs_lu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(oc);
+
+ sc->realize = ufs_lu_realize;
+ sc->unrealize = ufs_lu_unrealize;
+ sc->alloc_req = scsi_new_request;
+ dc->bus_type = TYPE_UFS_BUS;
+ device_class_set_props(dc, ufs_lu_props);
+ dc->desc = "Virtual UFS logical unit";
+}
+
+static void ufs_wlu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(oc);
+
+ /*
+ * The realize() function of TYPE_SCSI_DEVICE causes a segmentation fault
+ * if a block drive does not exist. Define a new realize function for
+ * well-known LUs that do not have a block drive.
+ */
+ dc->realize = ufs_wlu_realize;
+ sc->alloc_req = scsi_new_request;
+ dc->bus_type = TYPE_UFS_BUS;
+ dc->desc = "Virtual UFS well-known logical unit";
+}
+
+static const TypeInfo ufs_lu_info = {
+ .name = TYPE_UFS_LU,
+ .parent = TYPE_SCSI_DEVICE,
+ .class_init = ufs_lu_class_init,
+ .instance_size = sizeof(UfsLu),
+};
+
+static const TypeInfo ufs_wlu_info = {
+ .name = TYPE_UFS_WLU,
+ .parent = TYPE_SCSI_DEVICE,
+ .class_init = ufs_wlu_class_init,
+ .instance_size = sizeof(UfsWLu),
+};
+
+static void ufs_lu_register_types(void)
+{
+ type_register_static(&ufs_lu_info);
+ type_register_static(&ufs_wlu_info);
+}
+
+type_init(ufs_lu_register_types)
diff --git a/hw/ufs/meson.build b/hw/ufs/meson.build
new file mode 100644
index 0000000..6e68328
--- /dev/null
+++ b/hw/ufs/meson.build
@@ -0,0 +1 @@
+system_ss.add(when: 'CONFIG_UFS_PCI', if_true: files('ufs.c', 'lu.c'))
diff --git a/hw/ufs/trace-events b/hw/ufs/trace-events
new file mode 100644
index 0000000..1e55fb0
--- /dev/null
+++ b/hw/ufs/trace-events
@@ -0,0 +1,58 @@
+# ufs.c
+ufs_irq_raise(void) "INTx"
+ufs_irq_lower(void) "INTx"
+ufs_mmio_read(uint64_t addr, uint64_t data, unsigned size) "addr 0x%"PRIx64" data 0x%"PRIx64" size %d"
+ufs_mmio_write(uint64_t addr, uint64_t data, unsigned size) "addr 0x%"PRIx64" data 0x%"PRIx64" size %d"
+ufs_process_db(uint32_t slot) "UTRLDBR slot %"PRIu32""
+ufs_process_req(uint32_t slot) "UTRLDBR slot %"PRIu32""
+ufs_complete_req(uint32_t slot) "UTRLDBR slot %"PRIu32""
+ufs_sendback_req(uint32_t slot) "UTRLDBR slot %"PRIu32""
+ufs_exec_nop_cmd(uint32_t slot) "UTRLDBR slot %"PRIu32""
+ufs_exec_scsi_cmd(uint32_t slot, uint8_t lun, uint8_t opcode) "slot %"PRIu32", lun 0x%"PRIx8", opcode 0x%"PRIx8""
+ufs_exec_query_cmd(uint32_t slot, uint8_t opcode) "slot %"PRIu32", opcode 0x%"PRIx8""
+ufs_process_uiccmd(uint32_t uiccmd, uint32_t ucmdarg1, uint32_t ucmdarg2, uint32_t ucmdarg3) "uiccmd 0x%"PRIx32", ucmdarg1 0x%"PRIx32", ucmdarg2 0x%"PRIx32", ucmdarg3 0x%"PRIx32""
+
+# lu.c
+ufs_scsi_check_condition(uint32_t tag, uint8_t key, uint8_t asc, uint8_t ascq) "Command complete tag=0x%x sense=%d/%d/%d"
+ufs_scsi_read_complete(uint32_t tag, size_t size) "Data ready tag=0x%x len=%zd"
+ufs_scsi_read_data_count(uint32_t sector_count) "Read sector_count=%d"
+ufs_scsi_read_data_invalid(void) "Data transfer direction invalid"
+ufs_scsi_write_complete_noio(uint32_t tag, size_t size) "Write complete tag=0x%x more=%zd"
+ufs_scsi_write_data_invalid(void) "Data transfer direction invalid"
+ufs_scsi_emulate_vpd_page_00(size_t xfer) "Inquiry EVPD[Supported pages] buffer size %zd"
+ufs_scsi_emulate_vpd_page_80_not_supported(void) "Inquiry EVPD[Serial number] not supported"
+ufs_scsi_emulate_vpd_page_80(size_t xfer) "Inquiry EVPD[Serial number] buffer size %zd"
+ufs_scsi_emulate_vpd_page_87(size_t xfer) "Inquiry EVPD[Mode Page Policy] buffer size %zd"
+ufs_scsi_emulate_mode_sense(int cmd, int page, size_t xfer, int control) "Mode Sense(%d) (page %d, xfer %zd, page_control %d)"
+ufs_scsi_emulate_read_data(int buflen) "Read buf_len=%d"
+ufs_scsi_emulate_write_data(int buflen) "Write buf_len=%d"
+ufs_scsi_emulate_command_START_STOP(void) "START STOP UNIT"
+ufs_scsi_emulate_command_FORMAT_UNIT(void) "FORMAT UNIT"
+ufs_scsi_emulate_command_SEND_DIAGNOSTIC(void) "SEND DIAGNOSTIC"
+ufs_scsi_emulate_command_SAI_16(void) "SAI READ CAPACITY(16)"
+ufs_scsi_emulate_command_SAI_unsupported(void) "Unsupported Service Action In"
+ufs_scsi_emulate_command_MODE_SELECT_10(size_t xfer) "Mode Select(10) (len %zd)"
+ufs_scsi_emulate_command_VERIFY(int bytchk) "Verify (bytchk %d)"
+ufs_scsi_emulate_command_UNKNOWN(int cmd, const char *name) "Unknown SCSI command (0x%2.2x=%s)"
+ufs_scsi_dma_command_READ(uint64_t lba, uint32_t len) "Read (block %" PRIu64 ", count %u)"
+ufs_scsi_dma_command_WRITE(uint64_t lba, int len) "Write (block %" PRIu64 ", count %u)"
+
+# error condition
+ufs_err_dma_read_utrd(uint32_t slot, uint64_t addr) "failed to read utrd. UTRLDBR slot %"PRIu32", UTRD dma addr %"PRIu64""
+ufs_err_dma_read_req_upiu(uint32_t slot, uint64_t addr) "failed to read req upiu. UTRLDBR slot %"PRIu32", request upiu addr %"PRIu64""
+ufs_err_dma_read_prdt(uint32_t slot, uint64_t addr) "failed to read prdt. UTRLDBR slot %"PRIu32", prdt addr %"PRIu64""
+ufs_err_dma_write_utrd(uint32_t slot, uint64_t addr) "failed to write utrd. UTRLDBR slot %"PRIu32", UTRD dma addr %"PRIu64""
+ufs_err_dma_write_rsp_upiu(uint32_t slot, uint64_t addr) "failed to write rsp upiu. UTRLDBR slot %"PRIu32", response upiu addr %"PRIu64""
+ufs_err_utrl_slot_error(uint32_t slot) "UTRLDBR slot %"PRIu32" is in error"
+ufs_err_utrl_slot_busy(uint32_t slot) "UTRLDBR slot %"PRIu32" is busy"
+ufs_err_unsupport_register_offset(uint32_t offset) "Register offset 0x%"PRIx32" is not yet supported"
+ufs_err_invalid_register_offset(uint32_t offset) "Register offset 0x%"PRIx32" is invalid"
+ufs_err_scsi_cmd_invalid_lun(uint8_t lun) "scsi command has invalid lun: 0x%"PRIx8""
+ufs_err_query_flag_not_readable(uint8_t idn) "query flag idn 0x%"PRIx8" is denied to read"
+ufs_err_query_flag_not_writable(uint8_t idn) "query flag idn 0x%"PRIx8" is denied to write"
+ufs_err_query_attr_not_readable(uint8_t idn) "query attribute idn 0x%"PRIx8" is denied to read"
+ufs_err_query_attr_not_writable(uint8_t idn) "query attribute idn 0x%"PRIx8" is denied to write"
+ufs_err_query_invalid_opcode(uint8_t opcode) "query request has invalid opcode. opcode: 0x%"PRIx8""
+ufs_err_query_invalid_idn(uint8_t opcode, uint8_t idn) "query request has invalid idn. opcode: 0x%"PRIx8", idn 0x%"PRIx8""
+ufs_err_query_invalid_index(uint8_t opcode, uint8_t index) "query request has invalid index. opcode: 0x%"PRIx8", index 0x%"PRIx8""
+ufs_err_invalid_trans_code(uint32_t slot, uint8_t trans_code) "request upiu has invalid transaction code. slot: %"PRIu32", trans_code: 0x%"PRIx8""
diff --git a/hw/ufs/trace.h b/hw/ufs/trace.h
new file mode 100644
index 0000000..2dbd639
--- /dev/null
+++ b/hw/ufs/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-hw_ufs.h"
diff --git a/hw/ufs/ufs.c b/hw/ufs/ufs.c
new file mode 100644
index 0000000..0ecedb9
--- /dev/null
+++ b/hw/ufs/ufs.c
@@ -0,0 +1,1502 @@
+/*
+ * QEMU Universal Flash Storage (UFS) Controller
+ *
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Written by Jeuk Kim <jeuk20.kim@samsung.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+/**
+ * Reference Specs: https://www.jedec.org/, 3.1
+ *
+ * Usage
+ * -----
+ *
+ * Add options:
+ * -drive file=<file>,if=none,id=<drive_id>
+ * -device ufs,serial=<serial>,id=<bus_name>, \
+ * nutrs=<N[optional]>,nutmrs=<N[optional]>
+ * -device ufs-lu,drive=<drive_id>,bus=<bus_name>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "migration/vmstate.h"
+#include "trace.h"
+#include "ufs.h"
+
+/* The QEMU-UFS device follows spec version 3.1 */
+#define UFS_SPEC_VER 0x0310
+#define UFS_MAX_NUTRS 32
+#define UFS_MAX_NUTMRS 8
+
+static MemTxResult ufs_addr_read(UfsHc *u, hwaddr addr, void *buf, int size)
+{
+ hwaddr hi = addr + size - 1;
+
+ if (hi < addr) {
+ return MEMTX_DECODE_ERROR;
+ }
+
+ if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
+ return MEMTX_DECODE_ERROR;
+ }
+
+ return pci_dma_read(PCI_DEVICE(u), addr, buf, size);
+}
+
+static MemTxResult ufs_addr_write(UfsHc *u, hwaddr addr, const void *buf,
+ int size)
+{
+ hwaddr hi = addr + size - 1;
+ if (hi < addr) {
+ return MEMTX_DECODE_ERROR;
+ }
+
+ if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
+ return MEMTX_DECODE_ERROR;
+ }
+
+ return pci_dma_write(PCI_DEVICE(u), addr, buf, size);
+}
+
+static void ufs_complete_req(UfsRequest *req, UfsReqResult req_result);
+
+static inline hwaddr ufs_get_utrd_addr(UfsHc *u, uint32_t slot)
+{
+ hwaddr utrl_base_addr = (((hwaddr)u->reg.utrlbau) << 32) + u->reg.utrlba;
+ hwaddr utrd_addr = utrl_base_addr + slot * sizeof(UtpTransferReqDesc);
+
+ return utrd_addr;
+}
+
+static inline hwaddr ufs_get_req_upiu_base_addr(const UtpTransferReqDesc *utrd)
+{
+ uint32_t cmd_desc_base_addr_lo =
+ le32_to_cpu(utrd->command_desc_base_addr_lo);
+ uint32_t cmd_desc_base_addr_hi =
+ le32_to_cpu(utrd->command_desc_base_addr_hi);
+
+ return (((hwaddr)cmd_desc_base_addr_hi) << 32) + cmd_desc_base_addr_lo;
+}
+
+static inline hwaddr ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc *utrd)
+{
+ hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(utrd);
+ uint32_t rsp_upiu_byte_off =
+ le16_to_cpu(utrd->response_upiu_offset) * sizeof(uint32_t);
+ return req_upiu_base_addr + rsp_upiu_byte_off;
+}
+
+static MemTxResult ufs_dma_read_utrd(UfsRequest *req)
+{
+ UfsHc *u = req->hc;
+ hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
+ MemTxResult ret;
+
+ ret = ufs_addr_read(u, utrd_addr, &req->utrd, sizeof(req->utrd));
+ if (ret) {
+ trace_ufs_err_dma_read_utrd(req->slot, utrd_addr);
+ }
+ return ret;
+}
+
+static MemTxResult ufs_dma_read_req_upiu(UfsRequest *req)
+{
+ UfsHc *u = req->hc;
+ hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
+ UtpUpiuReq *req_upiu = &req->req_upiu;
+ uint32_t copy_size;
+ uint16_t data_segment_length;
+ MemTxResult ret;
+
+ /*
+ * To know the size of the req_upiu, we need to read the
+ * data_segment_length in the header first.
+ */
+ ret = ufs_addr_read(u, req_upiu_base_addr, &req_upiu->header,
+ sizeof(UtpUpiuHeader));
+ if (ret) {
+ trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
+ return ret;
+ }
+ data_segment_length = be16_to_cpu(req_upiu->header.data_segment_length);
+
+ copy_size = sizeof(UtpUpiuHeader) + UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
+ data_segment_length;
+
+ ret = ufs_addr_read(u, req_upiu_base_addr, &req->req_upiu, copy_size);
+ if (ret) {
+ trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
+ }
+ return ret;
+}
+
+static MemTxResult ufs_dma_read_prdt(UfsRequest *req)
+{
+ UfsHc *u = req->hc;
+ uint16_t prdt_len = le16_to_cpu(req->utrd.prd_table_length);
+ uint16_t prdt_byte_off =
+ le16_to_cpu(req->utrd.prd_table_offset) * sizeof(uint32_t);
+ uint32_t prdt_size = prdt_len * sizeof(UfshcdSgEntry);
+ g_autofree UfshcdSgEntry *prd_entries = NULL;
+ hwaddr req_upiu_base_addr, prdt_base_addr;
+ int err;
+
+ assert(!req->sg);
+
+ if (prdt_size == 0) {
+ return MEMTX_OK;
+ }
+ prd_entries = g_new(UfshcdSgEntry, prdt_size);
+
+ req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
+ prdt_base_addr = req_upiu_base_addr + prdt_byte_off;
+
+ err = ufs_addr_read(u, prdt_base_addr, prd_entries, prdt_size);
+ if (err) {
+ trace_ufs_err_dma_read_prdt(req->slot, prdt_base_addr);
+ return err;
+ }
+
+ req->sg = g_malloc0(sizeof(QEMUSGList));
+ pci_dma_sglist_init(req->sg, PCI_DEVICE(u), prdt_len);
+
+ for (uint16_t i = 0; i < prdt_len; ++i) {
+ hwaddr data_dma_addr = le64_to_cpu(prd_entries[i].addr);
+ uint32_t data_byte_count = le32_to_cpu(prd_entries[i].size) + 1;
+ qemu_sglist_add(req->sg, data_dma_addr, data_byte_count);
+ }
+ return MEMTX_OK;
+}
+
+static MemTxResult ufs_dma_read_upiu(UfsRequest *req)
+{
+ MemTxResult ret;
+
+ ret = ufs_dma_read_utrd(req);
+ if (ret) {
+ return ret;
+ }
+
+ ret = ufs_dma_read_req_upiu(req);
+ if (ret) {
+ return ret;
+ }
+
+ ret = ufs_dma_read_prdt(req);
+ if (ret) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static MemTxResult ufs_dma_write_utrd(UfsRequest *req)
+{
+ UfsHc *u = req->hc;
+ hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
+ MemTxResult ret;
+
+ ret = ufs_addr_write(u, utrd_addr, &req->utrd, sizeof(req->utrd));
+ if (ret) {
+ trace_ufs_err_dma_write_utrd(req->slot, utrd_addr);
+ }
+ return ret;
+}
+
+static MemTxResult ufs_dma_write_rsp_upiu(UfsRequest *req)
+{
+ UfsHc *u = req->hc;
+ hwaddr rsp_upiu_base_addr = ufs_get_rsp_upiu_base_addr(&req->utrd);
+ uint32_t rsp_upiu_byte_len =
+ le16_to_cpu(req->utrd.response_upiu_length) * sizeof(uint32_t);
+ uint16_t data_segment_length =
+ be16_to_cpu(req->rsp_upiu.header.data_segment_length);
+ uint32_t copy_size = sizeof(UtpUpiuHeader) +
+ UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
+ data_segment_length;
+ MemTxResult ret;
+
+ if (copy_size > rsp_upiu_byte_len) {
+ copy_size = rsp_upiu_byte_len;
+ }
+
+ ret = ufs_addr_write(u, rsp_upiu_base_addr, &req->rsp_upiu, copy_size);
+ if (ret) {
+ trace_ufs_err_dma_write_rsp_upiu(req->slot, rsp_upiu_base_addr);
+ }
+ return ret;
+}
+
+static MemTxResult ufs_dma_write_upiu(UfsRequest *req)
+{
+ MemTxResult ret;
+
+ ret = ufs_dma_write_rsp_upiu(req);
+ if (ret) {
+ return ret;
+ }
+
+ return ufs_dma_write_utrd(req);
+}
+
+static void ufs_irq_check(UfsHc *u)
+{
+ PCIDevice *pci = PCI_DEVICE(u);
+
+ if ((u->reg.is & UFS_INTR_MASK) & u->reg.ie) {
+ trace_ufs_irq_raise();
+ pci_irq_assert(pci);
+ } else {
+ trace_ufs_irq_lower();
+ pci_irq_deassert(pci);
+ }
+}
+
+static void ufs_process_db(UfsHc *u, uint32_t val)
+{
+ unsigned long doorbell;
+ uint32_t slot;
+ uint32_t nutrs = u->params.nutrs;
+ UfsRequest *req;
+
+ val &= ~u->reg.utrldbr;
+ if (!val) {
+ return;
+ }
+
+ doorbell = val;
+ slot = find_first_bit(&doorbell, nutrs);
+
+ while (slot < nutrs) {
+ req = &u->req_list[slot];
+ if (req->state == UFS_REQUEST_ERROR) {
+ trace_ufs_err_utrl_slot_error(req->slot);
+ return;
+ }
+
+ if (req->state != UFS_REQUEST_IDLE) {
+ trace_ufs_err_utrl_slot_busy(req->slot);
+ return;
+ }
+
+ trace_ufs_process_db(slot);
+ req->state = UFS_REQUEST_READY;
+ slot = find_next_bit(&doorbell, nutrs, slot + 1);
+ }
+
+ qemu_bh_schedule(u->doorbell_bh);
+}
+
+static void ufs_process_uiccmd(UfsHc *u, uint32_t val)
+{
+ trace_ufs_process_uiccmd(val, u->reg.ucmdarg1, u->reg.ucmdarg2,
+ u->reg.ucmdarg3);
+ /*
+ * Only the essential uic commands for running drivers on Linux and Windows
+ * are implemented.
+ */
+ switch (val) {
+ case UFS_UIC_CMD_DME_LINK_STARTUP:
+ u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, DP, 1);
+ u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTRLRDY, 1);
+ u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTMRLRDY, 1);
+ u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
+ break;
+ /* TODO: Revisit it when Power Management is implemented */
+ case UFS_UIC_CMD_DME_HIBER_ENTER:
+ u->reg.is = FIELD_DP32(u->reg.is, IS, UHES, 1);
+ u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
+ u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
+ break;
+ case UFS_UIC_CMD_DME_HIBER_EXIT:
+ u->reg.is = FIELD_DP32(u->reg.is, IS, UHXS, 1);
+ u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
+ u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
+ break;
+ default:
+ u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_FAILURE;
+ }
+
+ u->reg.is = FIELD_DP32(u->reg.is, IS, UCCS, 1);
+
+ ufs_irq_check(u);
+}
+
+static void ufs_write_reg(UfsHc *u, hwaddr offset, uint32_t data, unsigned size)
+{
+ switch (offset) {
+ case A_IS:
+ u->reg.is &= ~data;
+ ufs_irq_check(u);
+ break;
+ case A_IE:
+ u->reg.ie = data;
+ ufs_irq_check(u);
+ break;
+ case A_HCE:
+ if (!FIELD_EX32(u->reg.hce, HCE, HCE) && FIELD_EX32(data, HCE, HCE)) {
+ u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UCRDY, 1);
+ u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 1);
+ } else if (FIELD_EX32(u->reg.hce, HCE, HCE) &&
+ !FIELD_EX32(data, HCE, HCE)) {
+ u->reg.hcs = 0;
+ u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 0);
+ }
+ break;
+ case A_UTRLBA:
+ u->reg.utrlba = data & R_UTRLBA_UTRLBA_MASK;
+ break;
+ case A_UTRLBAU:
+ u->reg.utrlbau = data;
+ break;
+ case A_UTRLDBR:
+ ufs_process_db(u, data);
+ u->reg.utrldbr |= data;
+ break;
+ case A_UTRLRSR:
+ u->reg.utrlrsr = data;
+ break;
+ case A_UTRLCNR:
+ u->reg.utrlcnr &= ~data;
+ break;
+ case A_UTMRLBA:
+ u->reg.utmrlba = data & R_UTMRLBA_UTMRLBA_MASK;
+ break;
+ case A_UTMRLBAU:
+ u->reg.utmrlbau = data;
+ break;
+ case A_UICCMD:
+ ufs_process_uiccmd(u, data);
+ break;
+ case A_UCMDARG1:
+ u->reg.ucmdarg1 = data;
+ break;
+ case A_UCMDARG2:
+ u->reg.ucmdarg2 = data;
+ break;
+ case A_UCMDARG3:
+ u->reg.ucmdarg3 = data;
+ break;
+ case A_UTRLCLR:
+ case A_UTMRLDBR:
+ case A_UTMRLCLR:
+ case A_UTMRLRSR:
+ trace_ufs_err_unsupport_register_offset(offset);
+ break;
+ default:
+ trace_ufs_err_invalid_register_offset(offset);
+ break;
+ }
+}
+
+static uint64_t ufs_mmio_read(void *opaque, hwaddr addr, unsigned size)
+{
+ UfsHc *u = (UfsHc *)opaque;
+ uint8_t *ptr = (uint8_t *)&u->reg;
+ uint64_t value;
+
+ if (addr > sizeof(u->reg) - size) {
+ trace_ufs_err_invalid_register_offset(addr);
+ return 0;
+ }
+
+ value = *(uint32_t *)(ptr + addr);
+ trace_ufs_mmio_read(addr, value, size);
+ return value;
+}
+
+static void ufs_mmio_write(void *opaque, hwaddr addr, uint64_t data,
+ unsigned size)
+{
+ UfsHc *u = (UfsHc *)opaque;
+
+ if (addr > sizeof(u->reg) - size) {
+ trace_ufs_err_invalid_register_offset(addr);
+ return;
+ }
+
+ trace_ufs_mmio_write(addr, data, size);
+ ufs_write_reg(u, addr, data, size);
+}
+
+static const MemoryRegionOps ufs_mmio_ops = {
+ .read = ufs_mmio_read,
+ .write = ufs_mmio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static QEMUSGList *ufs_get_sg_list(SCSIRequest *scsi_req)
+{
+ UfsRequest *req = scsi_req->hba_private;
+ return req->sg;
+}
+
+static void ufs_build_upiu_sense_data(UfsRequest *req, SCSIRequest *scsi_req)
+{
+ req->rsp_upiu.sr.sense_data_len = cpu_to_be16(scsi_req->sense_len);
+ assert(scsi_req->sense_len <= SCSI_SENSE_LEN);
+ memcpy(req->rsp_upiu.sr.sense_data, scsi_req->sense, scsi_req->sense_len);
+}
+
+static void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type,
+ uint8_t flags, uint8_t response,
+ uint8_t scsi_status,
+ uint16_t data_segment_length)
+{
+ memcpy(&req->rsp_upiu.header, &req->req_upiu.header, sizeof(UtpUpiuHeader));
+ req->rsp_upiu.header.trans_type = trans_type;
+ req->rsp_upiu.header.flags = flags;
+ req->rsp_upiu.header.response = response;
+ req->rsp_upiu.header.scsi_status = scsi_status;
+ req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length);
+}
+
+static void ufs_scsi_command_complete(SCSIRequest *scsi_req, size_t resid)
+{
+ UfsRequest *req = scsi_req->hba_private;
+ int16_t status = scsi_req->status;
+ uint32_t expected_len = be32_to_cpu(req->req_upiu.sc.exp_data_transfer_len);
+ uint32_t transfered_len = scsi_req->cmd.xfer - resid;
+ uint8_t flags = 0, response = UFS_COMMAND_RESULT_SUCESS;
+ uint16_t data_segment_length;
+
+ if (expected_len > transfered_len) {
+ req->rsp_upiu.sr.residual_transfer_count =
+ cpu_to_be32(expected_len - transfered_len);
+ flags |= UFS_UPIU_FLAG_UNDERFLOW;
+ } else if (expected_len < transfered_len) {
+ req->rsp_upiu.sr.residual_transfer_count =
+ cpu_to_be32(transfered_len - expected_len);
+ flags |= UFS_UPIU_FLAG_OVERFLOW;
+ }
+
+ if (status != 0) {
+ ufs_build_upiu_sense_data(req, scsi_req);
+ response = UFS_COMMAND_RESULT_FAIL;
+ }
+
+ data_segment_length = cpu_to_be16(scsi_req->sense_len +
+ sizeof(req->rsp_upiu.sr.sense_data_len));
+ ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_RESPONSE, flags, response,
+ status, data_segment_length);
+
+ ufs_complete_req(req, UFS_REQUEST_SUCCESS);
+
+ scsi_req->hba_private = NULL;
+ scsi_req_unref(scsi_req);
+}
+
+static const struct SCSIBusInfo ufs_scsi_info = {
+ .tcq = true,
+ .max_target = 0,
+ .max_lun = UFS_MAX_LUS,
+ .max_channel = 0,
+
+ .get_sg_list = ufs_get_sg_list,
+ .complete = ufs_scsi_command_complete,
+};
+
+static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req)
+{
+ UfsHc *u = req->hc;
+ uint8_t lun = req->req_upiu.header.lun;
+ uint8_t task_tag = req->req_upiu.header.task_tag;
+ SCSIDevice *dev = NULL;
+
+ trace_ufs_exec_scsi_cmd(req->slot, lun, req->req_upiu.sc.cdb[0]);
+
+ if (!is_wlun(lun)) {
+ if (lun >= u->device_desc.number_lu) {
+ trace_ufs_err_scsi_cmd_invalid_lun(lun);
+ return UFS_REQUEST_FAIL;
+ } else if (u->lus[lun] == NULL) {
+ trace_ufs_err_scsi_cmd_invalid_lun(lun);
+ return UFS_REQUEST_FAIL;
+ }
+ }
+
+ switch (lun) {
+ case UFS_UPIU_REPORT_LUNS_WLUN:
+ dev = &u->report_wlu->qdev;
+ break;
+ case UFS_UPIU_UFS_DEVICE_WLUN:
+ dev = &u->dev_wlu->qdev;
+ break;
+ case UFS_UPIU_BOOT_WLUN:
+ dev = &u->boot_wlu->qdev;
+ break;
+ case UFS_UPIU_RPMB_WLUN:
+ dev = &u->rpmb_wlu->qdev;
+ break;
+ default:
+ dev = &u->lus[lun]->qdev;
+ }
+
+ SCSIRequest *scsi_req = scsi_req_new(
+ dev, task_tag, lun, req->req_upiu.sc.cdb, UFS_CDB_SIZE, req);
+
+ uint32_t len = scsi_req_enqueue(scsi_req);
+ if (len) {
+ scsi_req_continue(scsi_req);
+ }
+
+ return UFS_REQUEST_NO_COMPLETE;
+}
+
+static UfsReqResult ufs_exec_nop_cmd(UfsRequest *req)
+{
+ trace_ufs_exec_nop_cmd(req->slot);
+ ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_NOP_IN, 0, 0, 0, 0);
+ return UFS_REQUEST_SUCCESS;
+}
+
+/*
+ * This defines the permission of flags based on their IDN. There are some
+ * things that are declared read-only, which is inconsistent with the ufs spec,
+ * because we want to return an error for features that are not yet supported.
+ */
+static const int flag_permission[UFS_QUERY_FLAG_IDN_COUNT] = {
+ [UFS_QUERY_FLAG_IDN_FDEVICEINIT] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET,
+ /* Write protection is not supported */
+ [UFS_QUERY_FLAG_IDN_PERMANENT_WPE] = UFS_QUERY_FLAG_READ,
+ [UFS_QUERY_FLAG_IDN_PWR_ON_WPE] = UFS_QUERY_FLAG_READ,
+ [UFS_QUERY_FLAG_IDN_BKOPS_EN] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET |
+ UFS_QUERY_FLAG_CLEAR |
+ UFS_QUERY_FLAG_TOGGLE,
+ [UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE] =
+ UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET | UFS_QUERY_FLAG_CLEAR |
+ UFS_QUERY_FLAG_TOGGLE,
+ /* Purge Operation is not supported */
+ [UFS_QUERY_FLAG_IDN_PURGE_ENABLE] = UFS_QUERY_FLAG_NONE,
+ /* Refresh Operation is not supported */
+ [UFS_QUERY_FLAG_IDN_REFRESH_ENABLE] = UFS_QUERY_FLAG_NONE,
+ /* Physical Resource Removal is not supported */
+ [UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL] = UFS_QUERY_FLAG_READ,
+ [UFS_QUERY_FLAG_IDN_BUSY_RTC] = UFS_QUERY_FLAG_READ,
+ [UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE] = UFS_QUERY_FLAG_READ,
+ /* Write Booster is not supported */
+ [UFS_QUERY_FLAG_IDN_WB_EN] = UFS_QUERY_FLAG_READ,
+ [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN] = UFS_QUERY_FLAG_READ,
+ [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8] = UFS_QUERY_FLAG_READ,
+};
+
+static inline QueryRespCode ufs_flag_check_idn_valid(uint8_t idn, int op)
+{
+ if (idn >= UFS_QUERY_FLAG_IDN_COUNT) {
+ return UFS_QUERY_RESULT_INVALID_IDN;
+ }
+
+ if (!(flag_permission[idn] & op)) {
+ if (op == UFS_QUERY_FLAG_READ) {
+ trace_ufs_err_query_flag_not_readable(idn);
+ return UFS_QUERY_RESULT_NOT_READABLE;
+ }
+ trace_ufs_err_query_flag_not_writable(idn);
+ return UFS_QUERY_RESULT_NOT_WRITEABLE;
+ }
+
+ return UFS_QUERY_RESULT_SUCCESS;
+}
+
+static const int attr_permission[UFS_QUERY_ATTR_IDN_COUNT] = {
+ /* booting is not supported */
+ [UFS_QUERY_ATTR_IDN_BOOT_LU_EN] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_POWER_MODE] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL] =
+ UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+ [UFS_QUERY_ATTR_IDN_OOO_DATA_EN] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_BKOPS_STATUS] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_PURGE_STATUS] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_MAX_DATA_IN] =
+ UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+ [UFS_QUERY_ATTR_IDN_MAX_DATA_OUT] =
+ UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+ [UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_REF_CLK_FREQ] =
+ UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+ [UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT] =
+ UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+ [UFS_QUERY_ATTR_IDN_EE_CONTROL] =
+ UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+ [UFS_QUERY_ATTR_IDN_EE_STATUS] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_SECONDS_PASSED] = UFS_QUERY_ATTR_WRITE,
+ [UFS_QUERY_ATTR_IDN_CNTX_CONF] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_FFU_STATUS] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_PSA_STATE] = UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+ [UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE] =
+ UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
+ [UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_THROTTLING_STATUS] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
+ /* refresh operation is not supported */
+ [UFS_QUERY_ATTR_IDN_REFRESH_STATUS] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_REFRESH_FREQ] = UFS_QUERY_ATTR_READ,
+ [UFS_QUERY_ATTR_IDN_REFRESH_UNIT] = UFS_QUERY_ATTR_READ,
+};
+
+static inline QueryRespCode ufs_attr_check_idn_valid(uint8_t idn, int op)
+{
+ if (idn >= UFS_QUERY_ATTR_IDN_COUNT) {
+ return UFS_QUERY_RESULT_INVALID_IDN;
+ }
+
+ if (!(attr_permission[idn] & op)) {
+ if (op == UFS_QUERY_ATTR_READ) {
+ trace_ufs_err_query_attr_not_readable(idn);
+ return UFS_QUERY_RESULT_NOT_READABLE;
+ }
+ trace_ufs_err_query_attr_not_writable(idn);
+ return UFS_QUERY_RESULT_NOT_WRITEABLE;
+ }
+
+ return UFS_QUERY_RESULT_SUCCESS;
+}
+
+static QueryRespCode ufs_exec_query_flag(UfsRequest *req, int op)
+{
+ UfsHc *u = req->hc;
+ uint8_t idn = req->req_upiu.qr.idn;
+ uint32_t value;
+ QueryRespCode ret;
+
+ ret = ufs_flag_check_idn_valid(idn, op);
+ if (ret) {
+ return ret;
+ }
+
+ if (idn == UFS_QUERY_FLAG_IDN_FDEVICEINIT) {
+ value = 0;
+ } else if (op == UFS_QUERY_FLAG_READ) {
+ value = *(((uint8_t *)&u->flags) + idn);
+ } else if (op == UFS_QUERY_FLAG_SET) {
+ value = 1;
+ } else if (op == UFS_QUERY_FLAG_CLEAR) {
+ value = 0;
+ } else if (op == UFS_QUERY_FLAG_TOGGLE) {
+ value = *(((uint8_t *)&u->flags) + idn);
+ value = !value;
+ } else {
+ trace_ufs_err_query_invalid_opcode(op);
+ return UFS_QUERY_RESULT_INVALID_OPCODE;
+ }
+
+ *(((uint8_t *)&u->flags) + idn) = value;
+ req->rsp_upiu.qr.value = cpu_to_be32(value);
+ return UFS_QUERY_RESULT_SUCCESS;
+}
+
+static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
+{
+ switch (idn) {
+ case UFS_QUERY_ATTR_IDN_BOOT_LU_EN:
+ return u->attributes.boot_lun_en;
+ case UFS_QUERY_ATTR_IDN_POWER_MODE:
+ return u->attributes.current_power_mode;
+ case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
+ return u->attributes.active_icc_level;
+ case UFS_QUERY_ATTR_IDN_OOO_DATA_EN:
+ return u->attributes.out_of_order_data_en;
+ case UFS_QUERY_ATTR_IDN_BKOPS_STATUS:
+ return u->attributes.background_op_status;
+ case UFS_QUERY_ATTR_IDN_PURGE_STATUS:
+ return u->attributes.purge_status;
+ case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
+ return u->attributes.max_data_in_size;
+ case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
+ return u->attributes.max_data_out_size;
+ case UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED:
+ return be32_to_cpu(u->attributes.dyn_cap_needed);
+ case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
+ return u->attributes.ref_clk_freq;
+ case UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK:
+ return u->attributes.config_descr_lock;
+ case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
+ return u->attributes.max_num_of_rtt;
+ case UFS_QUERY_ATTR_IDN_EE_CONTROL:
+ return be16_to_cpu(u->attributes.exception_event_control);
+ case UFS_QUERY_ATTR_IDN_EE_STATUS:
+ return be16_to_cpu(u->attributes.exception_event_status);
+ case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
+ return be32_to_cpu(u->attributes.seconds_passed);
+ case UFS_QUERY_ATTR_IDN_CNTX_CONF:
+ return be16_to_cpu(u->attributes.context_conf);
+ case UFS_QUERY_ATTR_IDN_FFU_STATUS:
+ return u->attributes.device_ffu_status;
+ case UFS_QUERY_ATTR_IDN_PSA_STATE:
+ return be32_to_cpu(u->attributes.psa_state);
+ case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
+ return be32_to_cpu(u->attributes.psa_data_size);
+ case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME:
+ return u->attributes.ref_clk_gating_wait_time;
+ case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP:
+ return u->attributes.device_case_rough_temperaure;
+ case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND:
+ return u->attributes.device_too_high_temp_boundary;
+ case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND:
+ return u->attributes.device_too_low_temp_boundary;
+ case UFS_QUERY_ATTR_IDN_THROTTLING_STATUS:
+ return u->attributes.throttling_status;
+ case UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS:
+ return u->attributes.wb_buffer_flush_status;
+ case UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE:
+ return u->attributes.available_wb_buffer_size;
+ case UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST:
+ return u->attributes.wb_buffer_life_time_est;
+ case UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE:
+ return be32_to_cpu(u->attributes.current_wb_buffer_size);
+ case UFS_QUERY_ATTR_IDN_REFRESH_STATUS:
+ return u->attributes.refresh_status;
+ case UFS_QUERY_ATTR_IDN_REFRESH_FREQ:
+ return u->attributes.refresh_freq;
+ case UFS_QUERY_ATTR_IDN_REFRESH_UNIT:
+ return u->attributes.refresh_unit;
+ }
+ return 0;
+}
+
+static void ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value)
+{
+ switch (idn) {
+ case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
+ u->attributes.active_icc_level = value;
+ break;
+ case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
+ u->attributes.max_data_in_size = value;
+ break;
+ case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
+ u->attributes.max_data_out_size = value;
+ break;
+ case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
+ u->attributes.ref_clk_freq = value;
+ break;
+ case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
+ u->attributes.max_num_of_rtt = value;
+ break;
+ case UFS_QUERY_ATTR_IDN_EE_CONTROL:
+ u->attributes.exception_event_control = cpu_to_be16(value);
+ break;
+ case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
+ u->attributes.seconds_passed = cpu_to_be32(value);
+ break;
+ case UFS_QUERY_ATTR_IDN_PSA_STATE:
+ u->attributes.psa_state = value;
+ break;
+ case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
+ u->attributes.psa_data_size = cpu_to_be32(value);
+ break;
+ }
+}
+
+static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op)
+{
+ UfsHc *u = req->hc;
+ uint8_t idn = req->req_upiu.qr.idn;
+ uint32_t value;
+ QueryRespCode ret;
+
+ ret = ufs_attr_check_idn_valid(idn, op);
+ if (ret) {
+ return ret;
+ }
+
+ if (op == UFS_QUERY_ATTR_READ) {
+ value = ufs_read_attr_value(u, idn);
+ } else {
+ value = be32_to_cpu(req->req_upiu.qr.value);
+ ufs_write_attr_value(u, idn, value);
+ }
+
+ req->rsp_upiu.qr.value = cpu_to_be32(value);
+ return UFS_QUERY_RESULT_SUCCESS;
+}
+
+static const RpmbUnitDescriptor rpmb_unit_desc = {
+ .length = sizeof(RpmbUnitDescriptor),
+ .descriptor_idn = 2,
+ .unit_index = UFS_UPIU_RPMB_WLUN,
+ .lu_enable = 0,
+};
+
+static QueryRespCode ufs_read_unit_desc(UfsRequest *req)
+{
+ UfsHc *u = req->hc;
+ uint8_t lun = req->req_upiu.qr.index;
+
+ if (lun != UFS_UPIU_RPMB_WLUN &&
+ (lun > UFS_MAX_LUS || u->lus[lun] == NULL)) {
+ trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, lun);
+ return UFS_QUERY_RESULT_INVALID_INDEX;
+ }
+
+ if (lun == UFS_UPIU_RPMB_WLUN) {
+ memcpy(&req->rsp_upiu.qr.data, &rpmb_unit_desc, rpmb_unit_desc.length);
+ } else {
+ memcpy(&req->rsp_upiu.qr.data, &u->lus[lun]->unit_desc,
+ sizeof(u->lus[lun]->unit_desc));
+ }
+
+ return UFS_QUERY_RESULT_SUCCESS;
+}
+
+static inline StringDescriptor manufacturer_str_desc(void)
+{
+ StringDescriptor desc = {
+ .length = 0x12,
+ .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
+ };
+ desc.UC[0] = cpu_to_be16('R');
+ desc.UC[1] = cpu_to_be16('E');
+ desc.UC[2] = cpu_to_be16('D');
+ desc.UC[3] = cpu_to_be16('H');
+ desc.UC[4] = cpu_to_be16('A');
+ desc.UC[5] = cpu_to_be16('T');
+ return desc;
+}
+
+static inline StringDescriptor product_name_str_desc(void)
+{
+ StringDescriptor desc = {
+ .length = 0x22,
+ .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
+ };
+ desc.UC[0] = cpu_to_be16('Q');
+ desc.UC[1] = cpu_to_be16('E');
+ desc.UC[2] = cpu_to_be16('M');
+ desc.UC[3] = cpu_to_be16('U');
+ desc.UC[4] = cpu_to_be16(' ');
+ desc.UC[5] = cpu_to_be16('U');
+ desc.UC[6] = cpu_to_be16('F');
+ desc.UC[7] = cpu_to_be16('S');
+ return desc;
+}
+
+static inline StringDescriptor product_rev_level_str_desc(void)
+{
+ StringDescriptor desc = {
+ .length = 0x0a,
+ .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
+ };
+ desc.UC[0] = cpu_to_be16('0');
+ desc.UC[1] = cpu_to_be16('0');
+ desc.UC[2] = cpu_to_be16('0');
+ desc.UC[3] = cpu_to_be16('1');
+ return desc;
+}
+
+static const StringDescriptor null_str_desc = {
+ .length = 0x02,
+ .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
+};
+
+static QueryRespCode ufs_read_string_desc(UfsRequest *req)
+{
+ UfsHc *u = req->hc;
+ uint8_t index = req->req_upiu.qr.index;
+ StringDescriptor desc;
+
+ if (index == u->device_desc.manufacturer_name) {
+ desc = manufacturer_str_desc();
+ memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
+ } else if (index == u->device_desc.product_name) {
+ desc = product_name_str_desc();
+ memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
+ } else if (index == u->device_desc.serial_number) {
+ memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
+ } else if (index == u->device_desc.oem_id) {
+ memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
+ } else if (index == u->device_desc.product_revision_level) {
+ desc = product_rev_level_str_desc();
+ memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
+ } else {
+ trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, index);
+ return UFS_QUERY_RESULT_INVALID_INDEX;
+ }
+ return UFS_QUERY_RESULT_SUCCESS;
+}
+
+static inline InterconnectDescriptor interconnect_desc(void)
+{
+ InterconnectDescriptor desc = {
+ .length = sizeof(InterconnectDescriptor),
+ .descriptor_idn = UFS_QUERY_DESC_IDN_INTERCONNECT,
+ };
+ desc.bcd_unipro_version = cpu_to_be16(0x180);
+ desc.bcd_mphy_version = cpu_to_be16(0x410);
+ return desc;
+}
+
+static QueryRespCode ufs_read_desc(UfsRequest *req)
+{
+ UfsHc *u = req->hc;
+ QueryRespCode status;
+ uint8_t idn = req->req_upiu.qr.idn;
+ uint16_t length = be16_to_cpu(req->req_upiu.qr.length);
+ InterconnectDescriptor desc;
+
+ switch (idn) {
+ case UFS_QUERY_DESC_IDN_DEVICE:
+ memcpy(&req->rsp_upiu.qr.data, &u->device_desc, sizeof(u->device_desc));
+ status = UFS_QUERY_RESULT_SUCCESS;
+ break;
+ case UFS_QUERY_DESC_IDN_UNIT:
+ status = ufs_read_unit_desc(req);
+ break;
+ case UFS_QUERY_DESC_IDN_GEOMETRY:
+ memcpy(&req->rsp_upiu.qr.data, &u->geometry_desc,
+ sizeof(u->geometry_desc));
+ status = UFS_QUERY_RESULT_SUCCESS;
+ break;
+ case UFS_QUERY_DESC_IDN_INTERCONNECT: {
+ desc = interconnect_desc();
+ memcpy(&req->rsp_upiu.qr.data, &desc, sizeof(InterconnectDescriptor));
+ status = UFS_QUERY_RESULT_SUCCESS;
+ break;
+ }
+ case UFS_QUERY_DESC_IDN_STRING:
+ status = ufs_read_string_desc(req);
+ break;
+ case UFS_QUERY_DESC_IDN_POWER:
+ /* mocking of power descriptor is not supported */
+ memset(&req->rsp_upiu.qr.data, 0, sizeof(PowerParametersDescriptor));
+ req->rsp_upiu.qr.data[0] = sizeof(PowerParametersDescriptor);
+ req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_POWER;
+ status = UFS_QUERY_RESULT_SUCCESS;
+ break;
+ case UFS_QUERY_DESC_IDN_HEALTH:
+ /* mocking of health descriptor is not supported */
+ memset(&req->rsp_upiu.qr.data, 0, sizeof(DeviceHealthDescriptor));
+ req->rsp_upiu.qr.data[0] = sizeof(DeviceHealthDescriptor);
+ req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_HEALTH;
+ status = UFS_QUERY_RESULT_SUCCESS;
+ break;
+ default:
+ length = 0;
+ trace_ufs_err_query_invalid_idn(req->req_upiu.qr.opcode, idn);
+ status = UFS_QUERY_RESULT_INVALID_IDN;
+ }
+
+ if (length > req->rsp_upiu.qr.data[0]) {
+ length = req->rsp_upiu.qr.data[0];
+ }
+ req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode;
+ req->rsp_upiu.qr.idn = req->req_upiu.qr.idn;
+ req->rsp_upiu.qr.index = req->req_upiu.qr.index;
+ req->rsp_upiu.qr.selector = req->req_upiu.qr.selector;
+ req->rsp_upiu.qr.length = cpu_to_be16(length);
+
+ return status;
+}
+
+static QueryRespCode ufs_exec_query_read(UfsRequest *req)
+{
+ QueryRespCode status;
+ switch (req->req_upiu.qr.opcode) {
+ case UFS_UPIU_QUERY_OPCODE_NOP:
+ status = UFS_QUERY_RESULT_SUCCESS;
+ break;
+ case UFS_UPIU_QUERY_OPCODE_READ_DESC:
+ status = ufs_read_desc(req);
+ break;
+ case UFS_UPIU_QUERY_OPCODE_READ_ATTR:
+ status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_READ);
+ break;
+ case UFS_UPIU_QUERY_OPCODE_READ_FLAG:
+ status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_READ);
+ break;
+ default:
+ trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
+ status = UFS_QUERY_RESULT_INVALID_OPCODE;
+ break;
+ }
+
+ return status;
+}
+
+static QueryRespCode ufs_exec_query_write(UfsRequest *req)
+{
+ QueryRespCode status;
+ switch (req->req_upiu.qr.opcode) {
+ case UFS_UPIU_QUERY_OPCODE_NOP:
+ status = UFS_QUERY_RESULT_SUCCESS;
+ break;
+ case UFS_UPIU_QUERY_OPCODE_WRITE_DESC:
+ /* write descriptor is not supported */
+ status = UFS_QUERY_RESULT_NOT_WRITEABLE;
+ break;
+ case UFS_UPIU_QUERY_OPCODE_WRITE_ATTR:
+ status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_WRITE);
+ break;
+ case UFS_UPIU_QUERY_OPCODE_SET_FLAG:
+ status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_SET);
+ break;
+ case UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG:
+ status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_CLEAR);
+ break;
+ case UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG:
+ status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_TOGGLE);
+ break;
+ default:
+ trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
+ status = UFS_QUERY_RESULT_INVALID_OPCODE;
+ break;
+ }
+
+ return status;
+}
+
+static UfsReqResult ufs_exec_query_cmd(UfsRequest *req)
+{
+ uint8_t query_func = req->req_upiu.header.query_func;
+ uint16_t data_segment_length;
+ QueryRespCode status;
+
+ trace_ufs_exec_query_cmd(req->slot, req->req_upiu.qr.opcode);
+ if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST) {
+ status = ufs_exec_query_read(req);
+ } else if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST) {
+ status = ufs_exec_query_write(req);
+ } else {
+ status = UFS_QUERY_RESULT_GENERAL_FAILURE;
+ }
+
+ data_segment_length = be16_to_cpu(req->rsp_upiu.qr.length);
+ ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_QUERY_RSP, 0, status, 0,
+ data_segment_length);
+
+ if (status != UFS_QUERY_RESULT_SUCCESS) {
+ return UFS_REQUEST_FAIL;
+ }
+ return UFS_REQUEST_SUCCESS;
+}
+
+static void ufs_exec_req(UfsRequest *req)
+{
+ UfsReqResult req_result;
+
+ if (ufs_dma_read_upiu(req)) {
+ return;
+ }
+
+ switch (req->req_upiu.header.trans_type) {
+ case UFS_UPIU_TRANSACTION_NOP_OUT:
+ req_result = ufs_exec_nop_cmd(req);
+ break;
+ case UFS_UPIU_TRANSACTION_COMMAND:
+ req_result = ufs_exec_scsi_cmd(req);
+ break;
+ case UFS_UPIU_TRANSACTION_QUERY_REQ:
+ req_result = ufs_exec_query_cmd(req);
+ break;
+ default:
+ trace_ufs_err_invalid_trans_code(req->slot,
+ req->req_upiu.header.trans_type);
+ req_result = UFS_REQUEST_FAIL;
+ }
+
+ /*
+ * The ufs_complete_req for scsi commands is handled by the
+ * ufs_scsi_command_complete() callback function. Therefore, to avoid
+ * duplicate processing, ufs_complete_req() is not called for scsi commands.
+ */
+ if (req_result != UFS_REQUEST_NO_COMPLETE) {
+ ufs_complete_req(req, req_result);
+ }
+}
+
+static void ufs_process_req(void *opaque)
+{
+ UfsHc *u = opaque;
+ UfsRequest *req;
+ int slot;
+
+ for (slot = 0; slot < u->params.nutrs; slot++) {
+ req = &u->req_list[slot];
+
+ if (req->state != UFS_REQUEST_READY) {
+ continue;
+ }
+ trace_ufs_process_req(slot);
+ req->state = UFS_REQUEST_RUNNING;
+
+ ufs_exec_req(req);
+ }
+}
+
+static void ufs_complete_req(UfsRequest *req, UfsReqResult req_result)
+{
+ UfsHc *u = req->hc;
+ assert(req->state == UFS_REQUEST_RUNNING);
+
+ if (req_result == UFS_REQUEST_SUCCESS) {
+ req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_SUCCESS);
+ } else {
+ req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_INVALID_CMD_TABLE_ATTR);
+ }
+
+ trace_ufs_complete_req(req->slot);
+ req->state = UFS_REQUEST_COMPLETE;
+ qemu_bh_schedule(u->complete_bh);
+}
+
+static void ufs_clear_req(UfsRequest *req)
+{
+ if (req->sg != NULL) {
+ qemu_sglist_destroy(req->sg);
+ g_free(req->sg);
+ req->sg = NULL;
+ }
+
+ memset(&req->utrd, 0, sizeof(req->utrd));
+ memset(&req->req_upiu, 0, sizeof(req->req_upiu));
+ memset(&req->rsp_upiu, 0, sizeof(req->rsp_upiu));
+}
+
+static void ufs_sendback_req(void *opaque)
+{
+ UfsHc *u = opaque;
+ UfsRequest *req;
+ int slot;
+
+ for (slot = 0; slot < u->params.nutrs; slot++) {
+ req = &u->req_list[slot];
+
+ if (req->state != UFS_REQUEST_COMPLETE) {
+ continue;
+ }
+
+ if (ufs_dma_write_upiu(req)) {
+ req->state = UFS_REQUEST_ERROR;
+ continue;
+ }
+
+ /*
+ * TODO: UTP Transfer Request Interrupt Aggregation Control is not yet
+ * supported
+ */
+ if (le32_to_cpu(req->utrd.header.dword_2) != UFS_OCS_SUCCESS ||
+ le32_to_cpu(req->utrd.header.dword_0) & UFS_UTP_REQ_DESC_INT_CMD) {
+ u->reg.is = FIELD_DP32(u->reg.is, IS, UTRCS, 1);
+ }
+
+ u->reg.utrldbr &= ~(1 << slot);
+ u->reg.utrlcnr |= (1 << slot);
+
+ trace_ufs_sendback_req(req->slot);
+
+ ufs_clear_req(req);
+ req->state = UFS_REQUEST_IDLE;
+ }
+
+ ufs_irq_check(u);
+}
+
+static bool ufs_check_constraints(UfsHc *u, Error **errp)
+{
+ if (u->params.nutrs > UFS_MAX_NUTRS) {
+ error_setg(errp, "nutrs must be less than or equal to %d",
+ UFS_MAX_NUTRS);
+ return false;
+ }
+
+ if (u->params.nutmrs > UFS_MAX_NUTMRS) {
+ error_setg(errp, "nutmrs must be less than or equal to %d",
+ UFS_MAX_NUTMRS);
+ return false;
+ }
+
+ return true;
+}
+
+static void ufs_init_pci(UfsHc *u, PCIDevice *pci_dev)
+{
+ uint8_t *pci_conf = pci_dev->config;
+
+ pci_conf[PCI_INTERRUPT_PIN] = 1;
+ pci_config_set_prog_interface(pci_conf, 0x1);
+
+ memory_region_init_io(&u->iomem, OBJECT(u), &ufs_mmio_ops, u, "ufs",
+ u->reg_size);
+ pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &u->iomem);
+ u->irq = pci_allocate_irq(pci_dev);
+}
+
+static void ufs_init_state(UfsHc *u)
+{
+ u->req_list = g_new0(UfsRequest, u->params.nutrs);
+
+ for (int i = 0; i < u->params.nutrs; i++) {
+ u->req_list[i].hc = u;
+ u->req_list[i].slot = i;
+ u->req_list[i].sg = NULL;
+ u->req_list[i].state = UFS_REQUEST_IDLE;
+ }
+
+ u->doorbell_bh = qemu_bh_new_guarded(ufs_process_req, u,
+ &DEVICE(u)->mem_reentrancy_guard);
+ u->complete_bh = qemu_bh_new_guarded(ufs_sendback_req, u,
+ &DEVICE(u)->mem_reentrancy_guard);
+}
+
+static void ufs_init_hc(UfsHc *u)
+{
+ uint32_t cap = 0;
+
+ u->reg_size = pow2ceil(sizeof(UfsReg));
+
+ memset(&u->reg, 0, sizeof(u->reg));
+ cap = FIELD_DP32(cap, CAP, NUTRS, (u->params.nutrs - 1));
+ cap = FIELD_DP32(cap, CAP, RTT, 2);
+ cap = FIELD_DP32(cap, CAP, NUTMRS, (u->params.nutmrs - 1));
+ cap = FIELD_DP32(cap, CAP, AUTOH8, 0);
+ cap = FIELD_DP32(cap, CAP, 64AS, 1);
+ cap = FIELD_DP32(cap, CAP, OODDS, 0);
+ cap = FIELD_DP32(cap, CAP, UICDMETMS, 0);
+ cap = FIELD_DP32(cap, CAP, CS, 0);
+ u->reg.cap = cap;
+ u->reg.ver = UFS_SPEC_VER;
+
+ memset(&u->device_desc, 0, sizeof(DeviceDescriptor));
+ u->device_desc.length = sizeof(DeviceDescriptor);
+ u->device_desc.descriptor_idn = UFS_QUERY_DESC_IDN_DEVICE;
+ u->device_desc.device_sub_class = 0x01;
+ u->device_desc.number_lu = 0x00;
+ u->device_desc.number_wlu = 0x04;
+ /* TODO: Revisit it when Power Management is implemented */
+ u->device_desc.init_power_mode = 0x01; /* Active Mode */
+ u->device_desc.high_priority_lun = 0x7F; /* Same Priority */
+ u->device_desc.spec_version = cpu_to_be16(UFS_SPEC_VER);
+ u->device_desc.manufacturer_name = 0x00;
+ u->device_desc.product_name = 0x01;
+ u->device_desc.serial_number = 0x02;
+ u->device_desc.oem_id = 0x03;
+ u->device_desc.ud_0_base_offset = 0x16;
+ u->device_desc.ud_config_p_length = 0x1A;
+ u->device_desc.device_rtt_cap = 0x02;
+ u->device_desc.queue_depth = u->params.nutrs;
+ u->device_desc.product_revision_level = 0x04;
+
+ memset(&u->geometry_desc, 0, sizeof(GeometryDescriptor));
+ u->geometry_desc.length = sizeof(GeometryDescriptor);
+ u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY;
+ u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0;
+ u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4KB */
+ u->geometry_desc.allocation_unit_size = 0x1; /* 4KB */
+ u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */
+ u->geometry_desc.max_in_buffer_size = 0x8;
+ u->geometry_desc.max_out_buffer_size = 0x8;
+ u->geometry_desc.rpmb_read_write_size = 0x40;
+ u->geometry_desc.data_ordering =
+ 0x0; /* out-of-order data transfer is not supported */
+ u->geometry_desc.max_context_id_number = 0x5;
+ u->geometry_desc.supported_memory_types = cpu_to_be16(0x8001);
+
+ memset(&u->attributes, 0, sizeof(u->attributes));
+ u->attributes.max_data_in_size = 0x08;
+ u->attributes.max_data_out_size = 0x08;
+ u->attributes.ref_clk_freq = 0x01; /* 26 MHz */
+ /* configure descriptor is not supported */
+ u->attributes.config_descr_lock = 0x01;
+ u->attributes.max_num_of_rtt = 0x02;
+
+ memset(&u->flags, 0, sizeof(u->flags));
+ u->flags.permanently_disable_fw_update = 1;
+}
+
+static bool ufs_init_wlu(UfsHc *u, UfsWLu **wlu, uint8_t wlun, Error **errp)
+{
+ UfsWLu *new_wlu = UFSWLU(qdev_new(TYPE_UFS_WLU));
+
+ qdev_prop_set_uint32(DEVICE(new_wlu), "lun", wlun);
+
+ /*
+ * The well-known lu shares the same bus as the normal lu. If the well-known
+ * lu writes the same channel value as the normal lu, the report will be
+ * made not only for the normal lu but also for the well-known lu at
+ * REPORT_LUN time. To prevent this, the channel value of normal lu is fixed
+ * to 0 and the channel value of well-known lu is fixed to 1.
+ */
+ qdev_prop_set_uint32(DEVICE(new_wlu), "channel", 1);
+ if (!qdev_realize_and_unref(DEVICE(new_wlu), BUS(&u->bus), errp)) {
+ return false;
+ }
+
+ *wlu = new_wlu;
+ return true;
+}
+
+static void ufs_realize(PCIDevice *pci_dev, Error **errp)
+{
+ UfsHc *u = UFS(pci_dev);
+
+ if (!ufs_check_constraints(u, errp)) {
+ return;
+ }
+
+ qbus_init(&u->bus, sizeof(UfsBus), TYPE_UFS_BUS, &pci_dev->qdev,
+ u->parent_obj.qdev.id);
+ u->bus.parent_bus.info = &ufs_scsi_info;
+
+ ufs_init_state(u);
+ ufs_init_hc(u);
+ ufs_init_pci(u, pci_dev);
+
+ if (!ufs_init_wlu(u, &u->report_wlu, UFS_UPIU_REPORT_LUNS_WLUN, errp)) {
+ return;
+ }
+
+ if (!ufs_init_wlu(u, &u->dev_wlu, UFS_UPIU_UFS_DEVICE_WLUN, errp)) {
+ return;
+ }
+
+ if (!ufs_init_wlu(u, &u->boot_wlu, UFS_UPIU_BOOT_WLUN, errp)) {
+ return;
+ }
+
+ if (!ufs_init_wlu(u, &u->rpmb_wlu, UFS_UPIU_RPMB_WLUN, errp)) {
+ return;
+ }
+}
+
+static void ufs_exit(PCIDevice *pci_dev)
+{
+ UfsHc *u = UFS(pci_dev);
+
+ if (u->dev_wlu) {
+ object_unref(OBJECT(u->dev_wlu));
+ u->dev_wlu = NULL;
+ }
+
+ if (u->report_wlu) {
+ object_unref(OBJECT(u->report_wlu));
+ u->report_wlu = NULL;
+ }
+
+ if (u->rpmb_wlu) {
+ object_unref(OBJECT(u->rpmb_wlu));
+ u->rpmb_wlu = NULL;
+ }
+
+ if (u->boot_wlu) {
+ object_unref(OBJECT(u->boot_wlu));
+ u->boot_wlu = NULL;
+ }
+
+ qemu_bh_delete(u->doorbell_bh);
+ qemu_bh_delete(u->complete_bh);
+
+ for (int i = 0; i < u->params.nutrs; i++) {
+ ufs_clear_req(&u->req_list[i]);
+ }
+ g_free(u->req_list);
+}
+
+static Property ufs_props[] = {
+ DEFINE_PROP_STRING("serial", UfsHc, params.serial),
+ DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32),
+ DEFINE_PROP_UINT8("nutmrs", UfsHc, params.nutmrs, 8),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static const VMStateDescription ufs_vmstate = {
+ .name = "ufs",
+ .unmigratable = 1,
+};
+
+static void ufs_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
+
+ pc->realize = ufs_realize;
+ pc->exit = ufs_exit;
+ pc->vendor_id = PCI_VENDOR_ID_REDHAT;
+ pc->device_id = PCI_DEVICE_ID_REDHAT_UFS;
+ pc->class_id = PCI_CLASS_STORAGE_UFS;
+
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+ dc->desc = "Universal Flash Storage";
+ device_class_set_props(dc, ufs_props);
+ dc->vmsd = &ufs_vmstate;
+}
+
+static bool ufs_bus_check_address(BusState *qbus, DeviceState *qdev,
+ Error **errp)
+{
+ SCSIDevice *dev = SCSI_DEVICE(qdev);
+ UfsBusClass *ubc = UFS_BUS_GET_CLASS(qbus);
+ UfsHc *u = UFS(qbus->parent);
+
+ if (strcmp(object_get_typename(OBJECT(dev)), TYPE_UFS_WLU) == 0) {
+ if (dev->lun != UFS_UPIU_REPORT_LUNS_WLUN &&
+ dev->lun != UFS_UPIU_UFS_DEVICE_WLUN &&
+ dev->lun != UFS_UPIU_BOOT_WLUN && dev->lun != UFS_UPIU_RPMB_WLUN) {
+ error_setg(errp, "bad well-known lun: %d", dev->lun);
+ return false;
+ }
+
+ if ((dev->lun == UFS_UPIU_REPORT_LUNS_WLUN && u->report_wlu != NULL) ||
+ (dev->lun == UFS_UPIU_UFS_DEVICE_WLUN && u->dev_wlu != NULL) ||
+ (dev->lun == UFS_UPIU_BOOT_WLUN && u->boot_wlu != NULL) ||
+ (dev->lun == UFS_UPIU_RPMB_WLUN && u->rpmb_wlu != NULL)) {
+ error_setg(errp, "well-known lun %d already exists", dev->lun);
+ return false;
+ }
+
+ return true;
+ }
+
+ if (strcmp(object_get_typename(OBJECT(dev)), TYPE_UFS_LU) != 0) {
+ error_setg(errp, "%s cannot be connected to ufs-bus",
+ object_get_typename(OBJECT(dev)));
+ return false;
+ }
+
+ return ubc->parent_check_address(qbus, qdev, errp);
+}
+
+static void ufs_bus_class_init(ObjectClass *class, void *data)
+{
+ BusClass *bc = BUS_CLASS(class);
+ UfsBusClass *ubc = UFS_BUS_CLASS(class);
+ ubc->parent_check_address = bc->check_address;
+ bc->check_address = ufs_bus_check_address;
+}
+
+static const TypeInfo ufs_info = {
+ .name = TYPE_UFS,
+ .parent = TYPE_PCI_DEVICE,
+ .class_init = ufs_class_init,
+ .instance_size = sizeof(UfsHc),
+ .interfaces = (InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} },
+};
+
+static const TypeInfo ufs_bus_info = {
+ .name = TYPE_UFS_BUS,
+ .parent = TYPE_SCSI_BUS,
+ .class_init = ufs_bus_class_init,
+ .class_size = sizeof(UfsBusClass),
+ .instance_size = sizeof(UfsBus),
+};
+
+static void ufs_register_types(void)
+{
+ type_register_static(&ufs_info);
+ type_register_static(&ufs_bus_info);
+}
+
+type_init(ufs_register_types)
diff --git a/hw/ufs/ufs.h b/hw/ufs/ufs.h
new file mode 100644
index 0000000..f244228
--- /dev/null
+++ b/hw/ufs/ufs.h
@@ -0,0 +1,131 @@
+/*
+ * QEMU UFS
+ *
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * Written by Jeuk Kim <jeuk20.kim@samsung.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_UFS_UFS_H
+#define HW_UFS_UFS_H
+
+#include "hw/pci/pci_device.h"
+#include "hw/scsi/scsi.h"
+#include "block/ufs.h"
+
+#define UFS_MAX_LUS 32
+#define UFS_BLOCK_SIZE 4096
+
+typedef struct UfsBusClass {
+ BusClass parent_class;
+ bool (*parent_check_address)(BusState *bus, DeviceState *dev, Error **errp);
+} UfsBusClass;
+
+typedef struct UfsBus {
+ SCSIBus parent_bus;
+} UfsBus;
+
+#define TYPE_UFS_BUS "ufs-bus"
+DECLARE_OBJ_CHECKERS(UfsBus, UfsBusClass, UFS_BUS, TYPE_UFS_BUS)
+
+typedef enum UfsRequestState {
+ UFS_REQUEST_IDLE = 0,
+ UFS_REQUEST_READY = 1,
+ UFS_REQUEST_RUNNING = 2,
+ UFS_REQUEST_COMPLETE = 3,
+ UFS_REQUEST_ERROR = 4,
+} UfsRequestState;
+
+typedef enum UfsReqResult {
+ UFS_REQUEST_SUCCESS = 0,
+ UFS_REQUEST_FAIL = 1,
+ UFS_REQUEST_NO_COMPLETE = 2,
+} UfsReqResult;
+
+typedef struct UfsRequest {
+ struct UfsHc *hc;
+ UfsRequestState state;
+ int slot;
+
+ UtpTransferReqDesc utrd;
+ UtpUpiuReq req_upiu;
+ UtpUpiuRsp rsp_upiu;
+
+ /* for scsi command */
+ QEMUSGList *sg;
+} UfsRequest;
+
+typedef struct UfsLu {
+ SCSIDevice qdev;
+ uint8_t lun;
+ UnitDescriptor unit_desc;
+} UfsLu;
+
+typedef struct UfsWLu {
+ SCSIDevice qdev;
+ uint8_t lun;
+} UfsWLu;
+
+typedef struct UfsParams {
+ char *serial;
+ uint8_t nutrs; /* Number of UTP Transfer Request Slots */
+ uint8_t nutmrs; /* Number of UTP Task Management Request Slots */
+} UfsParams;
+
+typedef struct UfsHc {
+ PCIDevice parent_obj;
+ UfsBus bus;
+ MemoryRegion iomem;
+ UfsReg reg;
+ UfsParams params;
+ uint32_t reg_size;
+ UfsRequest *req_list;
+
+ UfsLu *lus[UFS_MAX_LUS];
+ UfsWLu *report_wlu;
+ UfsWLu *dev_wlu;
+ UfsWLu *boot_wlu;
+ UfsWLu *rpmb_wlu;
+ DeviceDescriptor device_desc;
+ GeometryDescriptor geometry_desc;
+ Attributes attributes;
+ Flags flags;
+
+ qemu_irq irq;
+ QEMUBH *doorbell_bh;
+ QEMUBH *complete_bh;
+} UfsHc;
+
+#define TYPE_UFS "ufs"
+#define UFS(obj) OBJECT_CHECK(UfsHc, (obj), TYPE_UFS)
+
+#define TYPE_UFS_LU "ufs-lu"
+#define UFSLU(obj) OBJECT_CHECK(UfsLu, (obj), TYPE_UFS_LU)
+
+#define TYPE_UFS_WLU "ufs-wlu"
+#define UFSWLU(obj) OBJECT_CHECK(UfsWLu, (obj), TYPE_UFS_WLU)
+
+typedef enum UfsQueryFlagPerm {
+ UFS_QUERY_FLAG_NONE = 0x0,
+ UFS_QUERY_FLAG_READ = 0x1,
+ UFS_QUERY_FLAG_SET = 0x2,
+ UFS_QUERY_FLAG_CLEAR = 0x4,
+ UFS_QUERY_FLAG_TOGGLE = 0x8,
+} UfsQueryFlagPerm;
+
+typedef enum UfsQueryAttrPerm {
+ UFS_QUERY_ATTR_NONE = 0x0,
+ UFS_QUERY_ATTR_READ = 0x1,
+ UFS_QUERY_ATTR_WRITE = 0x2,
+} UfsQueryAttrPerm;
+
+static inline bool is_wlun(uint8_t lun)
+{
+ return (lun == UFS_UPIU_REPORT_LUNS_WLUN ||
+ lun == UFS_UPIU_UFS_DEVICE_WLUN || lun == UFS_UPIU_BOOT_WLUN ||
+ lun == UFS_UPIU_RPMB_WLUN);
+}
+
+#endif /* HW_UFS_UFS_H */