aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2024-02-13 19:46:17 +0000
committerPeter Maydell <peter.maydell@linaro.org>2024-02-13 19:46:17 +0000
commit62357c047a5abc6ede992159ed7c0aaaeb50617a (patch)
tree6932920f625c8818a6d6e10828d02a142ab0200b
parentbc2e8b18fba33f30f25b7c2d74328493c0a2231d (diff)
parent78d68f312aec905edcb1dc5549a2c3a6f716605a (diff)
downloadqemu-62357c047a5abc6ede992159ed7c0aaaeb50617a.zip
qemu-62357c047a5abc6ede992159ed7c0aaaeb50617a.tar.gz
qemu-62357c047a5abc6ede992159ed7c0aaaeb50617a.tar.bz2
Merge tag 'qemu-sparc-20240213' of https://github.com/mcayland/qemu into staging
qemu-sparc queue # -----BEGIN PGP SIGNATURE----- # # iQFSBAABCgA8FiEEzGIauY6CIA2RXMnEW8LFb64PMh8FAmXLxQweHG1hcmsuY2F2 # ZS1heWxhbmRAaWxhbmRlLmNvLnVrAAoJEFvCxW+uDzIfn3UH/2blaWblrlMBQlGQ # fkQOI2IGCJ5yRuh70roTY2aPnUyfc70IvZMvYtHElRD0UqYaQgxSjBbnmsqdS+9c # IKJG3qlDbnu0GBKKpxw9pmtHJ5NsaAl9E9jLZEX6ISu2rWrBHt4XisZhz8U5cVuc # dmlM4onk2F3+UcfGh4ACPNwtbYqQHEfWwsLuYPdyDdI647Vs6fEgIjeixBi3BcpN # lzyzquu/AB5SMXRnKaP5CUHC01TM/US2HuZfZ4PzyA0CmIi1od4RHE1iEN7JNWyC # ki/dasFoELfeoEU/6JrfPOx65v+91hhkBzN+oC4eV3r5COQkmW7PTmlqS269sH5w # SZsOWcM= # =T2mw # -----END PGP SIGNATURE----- # gpg: Signature made Tue 13 Feb 2024 19:37:48 GMT # gpg: using RSA key CC621AB98E82200D915CC9C45BC2C56FAE0F321F # gpg: issuer "mark.cave-ayland@ilande.co.uk" # gpg: Good signature from "Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>" [full] # Primary key fingerprint: CC62 1AB9 8E82 200D 915C C9C4 5BC2 C56F AE0F 321F * tag 'qemu-sparc-20240213' of https://github.com/mcayland/qemu: (88 commits) esp.c: add my copyright to the file esp.c: switch TypeInfo registration to use DEFINE_TYPES() macro esp.c: keep track of the DRQ state during DMA esp.c: rename irq_data IRQ to drq_irq esp.c: implement DMA Transfer Pad command for DATA phases esp.c: replace n variable with len in esp_do_nodma() esp.c: consolidate DMA and PDMA logic in STATUS and MESSAGE IN phases esp.c: remove redundant n variable in PDMA COMMAND phase esp.c: consolidate DMA and PDMA logic in MESSAGE OUT phase esp.c: consolidate DMA and PDMA logic in DATA IN phase esp.c: consolidate DMA and PDMA logic in DATA OUT phase esp.c: only transfer non-DMA MESSAGE OUT phase data for specific commands esp.c: only transfer non-DMA COMMAND phase data for specific commands esp.c: improve ESP_RSEQ logic consolidation esp.c: handle non-DMA FIFO writes used to terminate DMA commands esp.c: remove restriction on FIFO read access when DMA memory routines defined esp.c: handle TC underflow for DMA SCSI requests esp.c: don't clear the SCSI phase when reading ESP_RINTR esp.c: ensure that STAT_INT is cleared when reading ESP_RINTR esp.c: consolidate end of command sequence after ICCS command ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--hw/scsi/esp.c1361
-rw-r--r--hw/scsi/trace-events1
-rw-r--r--include/hw/scsi/esp.h18
3 files changed, 722 insertions, 658 deletions
diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
index 3a1c9f7..590ff99 100644
--- a/hw/scsi/esp.c
+++ b/hw/scsi/esp.c
@@ -3,6 +3,7 @@
*
* Copyright (c) 2005-2006 Fabrice Bellard
* Copyright (c) 2012 Herve Poussineau
+ * Copyright (c) 2023 Mark Cave-Ayland
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -62,14 +63,20 @@ static void esp_lower_irq(ESPState *s)
static void esp_raise_drq(ESPState *s)
{
- qemu_irq_raise(s->irq_data);
- trace_esp_raise_drq();
+ if (!(s->drq_state)) {
+ qemu_irq_raise(s->drq_irq);
+ trace_esp_raise_drq();
+ s->drq_state = true;
+ }
}
static void esp_lower_drq(ESPState *s)
{
- qemu_irq_lower(s->irq_data);
- trace_esp_lower_drq();
+ if (s->drq_state) {
+ qemu_irq_lower(s->drq_irq);
+ trace_esp_lower_drq();
+ s->drq_state = false;
+ }
}
void esp_dma_enable(ESPState *s, int irq, int level)
@@ -121,17 +128,30 @@ static uint8_t esp_fifo_pop(Fifo8 *fifo)
static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
{
const uint8_t *buf;
- uint32_t n;
+ uint32_t n, n2;
+ int len;
if (maxlen == 0) {
return 0;
}
- buf = fifo8_pop_buf(fifo, maxlen, &n);
+ len = maxlen;
+ buf = fifo8_pop_buf(fifo, len, &n);
if (dest) {
memcpy(dest, buf, n);
}
+ /* Add FIFO wraparound if needed */
+ len -= n;
+ len = MIN(len, fifo8_num_used(fifo));
+ if (len) {
+ buf = fifo8_pop_buf(fifo, len, &n2);
+ if (dest) {
+ memcpy(&dest[n], buf, n2);
+ }
+ n += n2;
+ }
+
return n;
}
@@ -148,9 +168,15 @@ static uint32_t esp_get_tc(ESPState *s)
static void esp_set_tc(ESPState *s, uint32_t dmalen)
{
+ uint32_t old_tc = esp_get_tc(s);
+
s->rregs[ESP_TCLO] = dmalen;
s->rregs[ESP_TCMID] = dmalen >> 8;
s->rregs[ESP_TCHI] = dmalen >> 16;
+
+ if (old_tc && dmalen == 0) {
+ s->rregs[ESP_RSTAT] |= STAT_TC;
+ }
}
static uint32_t esp_get_stc(ESPState *s)
@@ -164,16 +190,29 @@ static uint32_t esp_get_stc(ESPState *s)
return dmalen;
}
+static const char *esp_phase_names[8] = {
+ "DATA OUT", "DATA IN", "COMMAND", "STATUS",
+ "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
+};
+
+static void esp_set_phase(ESPState *s, uint8_t phase)
+{
+ s->rregs[ESP_RSTAT] &= ~7;
+ s->rregs[ESP_RSTAT] |= phase;
+
+ trace_esp_set_phase(esp_phase_names[phase]);
+}
+
+static uint8_t esp_get_phase(ESPState *s)
+{
+ return s->rregs[ESP_RSTAT] & 7;
+}
+
static uint8_t esp_pdma_read(ESPState *s)
{
uint8_t val;
- if (s->do_cmd) {
- val = esp_fifo_pop(&s->cmdfifo);
- } else {
- val = esp_fifo_pop(&s->fifo);
- }
-
+ val = esp_fifo_pop(&s->fifo);
return val;
}
@@ -185,21 +224,12 @@ static void esp_pdma_write(ESPState *s, uint8_t val)
return;
}
- if (s->do_cmd) {
- esp_fifo_push(&s->cmdfifo, val);
- } else {
- esp_fifo_push(&s->fifo, val);
- }
+ esp_fifo_push(&s->fifo, val);
dmalen--;
esp_set_tc(s, dmalen);
}
-static void esp_set_pdma_cb(ESPState *s, enum pdma_cb cb)
-{
- s->pdma_cb = cb;
-}
-
static int esp_select(ESPState *s)
{
int target;
@@ -207,75 +237,31 @@ static int esp_select(ESPState *s)
target = s->wregs[ESP_WBUSID] & BUSID_DID;
s->ti_size = 0;
- fifo8_reset(&s->fifo);
+ s->rregs[ESP_RSEQ] = SEQ_0;
+
+ if (s->current_req) {
+ /* Started a new command before the old one finished. Cancel it. */
+ scsi_req_cancel(s->current_req);
+ }
s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
if (!s->current_dev) {
/* No such drive */
s->rregs[ESP_RSTAT] = 0;
s->rregs[ESP_RINTR] = INTR_DC;
- s->rregs[ESP_RSEQ] = SEQ_0;
esp_raise_irq(s);
return -1;
}
/*
* Note that we deliberately don't raise the IRQ here: this will be done
- * either in do_command_phase() for DATA OUT transfers or by the deferred
- * IRQ mechanism in esp_transfer_data() for DATA IN transfers
+ * either in esp_transfer_data() or esp_command_complete()
*/
- s->rregs[ESP_RINTR] |= INTR_FC;
- s->rregs[ESP_RSEQ] = SEQ_CD;
return 0;
}
-static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
-{
- uint8_t buf[ESP_CMDFIFO_SZ];
- uint32_t dmalen, n;
- int target;
-
- if (s->current_req) {
- /* Started a new command before the old one finished. Cancel it. */
- scsi_req_cancel(s->current_req);
- }
-
- target = s->wregs[ESP_WBUSID] & BUSID_DID;
- if (s->dma) {
- dmalen = MIN(esp_get_tc(s), maxlen);
- if (dmalen == 0) {
- return 0;
- }
- if (s->dma_memory_read) {
- s->dma_memory_read(s->dma_opaque, buf, dmalen);
- dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen);
- fifo8_push_all(&s->cmdfifo, buf, dmalen);
- } else {
- if (esp_select(s) < 0) {
- fifo8_reset(&s->cmdfifo);
- return -1;
- }
- esp_raise_drq(s);
- fifo8_reset(&s->cmdfifo);
- return 0;
- }
- } else {
- dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
- if (dmalen == 0) {
- return 0;
- }
- n = esp_fifo_pop_buf(&s->fifo, buf, dmalen);
- n = MIN(fifo8_num_free(&s->cmdfifo), n);
- fifo8_push_all(&s->cmdfifo, buf, n);
- }
- trace_esp_get_cmd(dmalen, target);
-
- if (esp_select(s) < 0) {
- fifo8_reset(&s->cmdfifo);
- return -1;
- }
- return dmalen;
-}
+static void esp_do_dma(ESPState *s);
+static void esp_do_nodma(ESPState *s);
static void do_command_phase(ESPState *s)
{
@@ -305,23 +291,16 @@ static void do_command_phase(ESPState *s)
datalen = scsi_req_enqueue(s->current_req);
s->ti_size = datalen;
fifo8_reset(&s->cmdfifo);
+ s->data_ready = false;
if (datalen != 0) {
- s->rregs[ESP_RSTAT] = STAT_TC;
- s->rregs[ESP_RSEQ] = SEQ_CD;
- s->ti_cmd = 0;
- esp_set_tc(s, 0);
+ /*
+ * Switch to DATA phase but wait until initial data xfer is
+ * complete before raising the command completion interrupt
+ */
if (datalen > 0) {
- /*
- * Switch to DATA IN phase but wait until initial data xfer is
- * complete before raising the command completion interrupt
- */
- s->data_in_ready = false;
- s->rregs[ESP_RSTAT] |= STAT_DI;
+ esp_set_phase(s, STAT_DI);
} else {
- s->rregs[ESP_RSTAT] |= STAT_DO;
- s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
- esp_raise_irq(s);
- esp_lower_drq(s);
+ esp_set_phase(s, STAT_DO);
}
scsi_req_continue(s->current_req);
return;
@@ -353,472 +332,576 @@ static void do_cmd(ESPState *s)
do_command_phase(s);
}
-static void satn_pdma_cb(ESPState *s)
-{
- if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
- s->cmdfifo_cdb_offset = 1;
- s->do_cmd = 0;
- do_cmd(s);
- }
-}
-
static void handle_satn(ESPState *s)
{
- int32_t cmdlen;
-
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_satn;
return;
}
- esp_set_pdma_cb(s, SATN_PDMA_CB);
- cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
- if (cmdlen > 0) {
- s->cmdfifo_cdb_offset = 1;
- s->do_cmd = 0;
- do_cmd(s);
- } else if (cmdlen == 0) {
- s->do_cmd = 1;
- /* Target present, but no cmd yet - switch to command phase */
- s->rregs[ESP_RSEQ] = SEQ_CD;
- s->rregs[ESP_RSTAT] = STAT_CD;
+
+ if (esp_select(s) < 0) {
+ return;
}
-}
-static void s_without_satn_pdma_cb(ESPState *s)
-{
- if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
- s->cmdfifo_cdb_offset = 0;
- s->do_cmd = 0;
- do_cmd(s);
+ esp_set_phase(s, STAT_MO);
+
+ if (s->dma) {
+ esp_do_dma(s);
+ } else {
+ esp_do_nodma(s);
}
}
static void handle_s_without_atn(ESPState *s)
{
- int32_t cmdlen;
-
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_s_without_atn;
return;
}
- esp_set_pdma_cb(s, S_WITHOUT_SATN_PDMA_CB);
- cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
- if (cmdlen > 0) {
- s->cmdfifo_cdb_offset = 0;
- s->do_cmd = 0;
- do_cmd(s);
- } else if (cmdlen == 0) {
- s->do_cmd = 1;
- /* Target present, but no cmd yet - switch to command phase */
- s->rregs[ESP_RSEQ] = SEQ_CD;
- s->rregs[ESP_RSTAT] = STAT_CD;
+
+ if (esp_select(s) < 0) {
+ return;
}
-}
-static void satn_stop_pdma_cb(ESPState *s)
-{
- if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
- trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
- s->do_cmd = 1;
- s->cmdfifo_cdb_offset = 1;
- s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
- s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
- s->rregs[ESP_RSEQ] = SEQ_CD;
- esp_raise_irq(s);
+ esp_set_phase(s, STAT_CD);
+ s->cmdfifo_cdb_offset = 0;
+
+ if (s->dma) {
+ esp_do_dma(s);
+ } else {
+ esp_do_nodma(s);
}
}
static void handle_satn_stop(ESPState *s)
{
- int32_t cmdlen;
-
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_satn_stop;
return;
}
- esp_set_pdma_cb(s, SATN_STOP_PDMA_CB);
- cmdlen = get_cmd(s, 1);
- if (cmdlen > 0) {
- trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
- s->do_cmd = 1;
- s->cmdfifo_cdb_offset = 1;
- s->rregs[ESP_RSTAT] = STAT_MO;
- s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
- s->rregs[ESP_RSEQ] = SEQ_MO;
- esp_raise_irq(s);
- } else if (cmdlen == 0) {
- s->do_cmd = 1;
- /* Target present, switch to message out phase */
- s->rregs[ESP_RSEQ] = SEQ_MO;
- s->rregs[ESP_RSTAT] = STAT_MO;
+
+ if (esp_select(s) < 0) {
+ return;
+ }
+
+ esp_set_phase(s, STAT_MO);
+ s->cmdfifo_cdb_offset = 0;
+
+ if (s->dma) {
+ esp_do_dma(s);
+ } else {
+ esp_do_nodma(s);
}
}
-static void write_response_pdma_cb(ESPState *s)
+static void handle_pad(ESPState *s)
{
- s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
- s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
- s->rregs[ESP_RSEQ] = SEQ_CD;
- esp_raise_irq(s);
+ if (s->dma) {
+ esp_do_dma(s);
+ } else {
+ esp_do_nodma(s);
+ }
}
static void write_response(ESPState *s)
{
- uint8_t buf[2];
-
trace_esp_write_response(s->status);
- buf[0] = s->status;
- buf[1] = 0;
-
if (s->dma) {
- if (s->dma_memory_write) {
- s->dma_memory_write(s->dma_opaque, buf, 2);
- s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
- s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
- s->rregs[ESP_RSEQ] = SEQ_CD;
- } else {
- esp_set_pdma_cb(s, WRITE_RESPONSE_PDMA_CB);
- esp_raise_drq(s);
- return;
- }
+ esp_do_dma(s);
} else {
- fifo8_reset(&s->fifo);
- fifo8_push_all(&s->fifo, buf, 2);
- s->rregs[ESP_RFLAGS] = 2;
+ esp_do_nodma(s);
}
- esp_raise_irq(s);
}
-static void esp_dma_done(ESPState *s)
+static int esp_cdb_length(ESPState *s)
{
- s->rregs[ESP_RSTAT] |= STAT_TC;
- s->rregs[ESP_RINTR] |= INTR_BS;
- s->rregs[ESP_RFLAGS] = 0;
- esp_set_tc(s, 0);
- esp_raise_irq(s);
+ const uint8_t *pbuf;
+ int cmdlen, len;
+
+ cmdlen = fifo8_num_used(&s->cmdfifo);
+ if (cmdlen < s->cmdfifo_cdb_offset) {
+ return 0;
+ }
+
+ pbuf = fifo8_peek_buf(&s->cmdfifo, cmdlen, NULL);
+ len = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
+
+ return len;
}
-static void do_dma_pdma_cb(ESPState *s)
+static void esp_dma_ti_check(ESPState *s)
{
- int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
- int len;
- uint32_t n;
+ if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
+ esp_lower_drq(s);
+ }
+}
- if (s->do_cmd) {
- /* Ensure we have received complete command after SATN and stop */
- if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) {
- return;
- }
+static void esp_do_dma(ESPState *s)
+{
+ uint32_t len, cmdlen;
+ uint8_t buf[ESP_CMDFIFO_SZ];
- s->ti_size = 0;
- if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
- /* No command received */
- if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
- return;
- }
+ len = esp_get_tc(s);
- /* Command has been received */
- s->do_cmd = 0;
- do_cmd(s);
+ switch (esp_get_phase(s)) {
+ case STAT_MO:
+ if (s->dma_memory_read) {
+ len = MIN(len, fifo8_num_free(&s->cmdfifo));
+ s->dma_memory_read(s->dma_opaque, buf, len);
+ esp_set_tc(s, esp_get_tc(s) - len);
} else {
- /*
- * Extra message out bytes received: update cmdfifo_cdb_offset
- * and then switch to command phase
- */
- s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
- s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
- s->rregs[ESP_RSEQ] = SEQ_CD;
- s->rregs[ESP_RINTR] |= INTR_BS;
- esp_raise_irq(s);
+ len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
+ len = MIN(fifo8_num_free(&s->cmdfifo), len);
+ esp_raise_drq(s);
}
- return;
- }
- if (!s->current_req) {
- return;
- }
-
- if (to_device) {
- /* Copy FIFO data to device */
- len = MIN(s->async_len, ESP_FIFO_SZ);
- len = MIN(len, fifo8_num_used(&s->fifo));
- n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
- s->async_buf += n;
- s->async_len -= n;
- s->ti_size += n;
-
- if (n < len) {
- /* Unaligned accesses can cause FIFO wraparound */
- len = len - n;
- n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
- s->async_buf += n;
- s->async_len -= n;
- s->ti_size += n;
- }
+ fifo8_push_all(&s->cmdfifo, buf, len);
+ s->cmdfifo_cdb_offset += len;
- if (s->async_len == 0) {
- scsi_req_continue(s->current_req);
- return;
- }
+ switch (s->rregs[ESP_CMD]) {
+ case CMD_SELATN | CMD_DMA:
+ if (fifo8_num_used(&s->cmdfifo) >= 1) {
+ /* First byte received, switch to command phase */
+ esp_set_phase(s, STAT_CD);
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->cmdfifo_cdb_offset = 1;
- if (esp_get_tc(s) == 0) {
- esp_lower_drq(s);
- esp_dma_done(s);
- }
+ if (fifo8_num_used(&s->cmdfifo) > 1) {
+ /* Process any additional command phase data */
+ esp_do_dma(s);
+ }
+ }
+ break;
- return;
- } else {
- if (s->async_len == 0) {
- /* Defer until the scsi layer has completed */
- scsi_req_continue(s->current_req);
- s->data_in_ready = false;
- return;
- }
+ case CMD_SELATNS | CMD_DMA:
+ if (fifo8_num_used(&s->cmdfifo) == 1) {
+ /* First byte received, stop in message out phase */
+ s->rregs[ESP_RSEQ] = SEQ_MO;
+ s->cmdfifo_cdb_offset = 1;
- if (esp_get_tc(s) != 0) {
- /* Copy device data to FIFO */
- len = MIN(s->async_len, esp_get_tc(s));
- len = MIN(len, fifo8_num_free(&s->fifo));
- fifo8_push_all(&s->fifo, s->async_buf, len);
- s->async_buf += len;
- s->async_len -= len;
- s->ti_size -= len;
- esp_set_tc(s, esp_get_tc(s) - len);
+ /* Raise command completion interrupt */
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
+ esp_raise_irq(s);
+ }
+ break;
+ case CMD_TI | CMD_DMA:
+ /* ATN remains asserted until TC == 0 */
if (esp_get_tc(s) == 0) {
- /* Indicate transfer to FIFO is complete */
- s->rregs[ESP_RSTAT] |= STAT_TC;
+ esp_set_phase(s, STAT_CD);
+ s->rregs[ESP_CMD] = 0;
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
}
- return;
+ break;
}
+ break;
- /* Partially filled a scsi buffer. Complete immediately. */
- esp_lower_drq(s);
- esp_dma_done(s);
- }
-}
-
-static void esp_do_dma(ESPState *s)
-{
- uint32_t len, cmdlen;
- int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
- uint8_t buf[ESP_CMDFIFO_SZ];
-
- len = esp_get_tc(s);
- if (s->do_cmd) {
- /*
- * handle_ti_cmd() case: esp_do_dma() is called only from
- * handle_ti_cmd() with do_cmd != NULL (see the assert())
- */
+ case STAT_CD:
cmdlen = fifo8_num_used(&s->cmdfifo);
trace_esp_do_dma(cmdlen, len);
if (s->dma_memory_read) {
len = MIN(len, fifo8_num_free(&s->cmdfifo));
s->dma_memory_read(s->dma_opaque, buf, len);
fifo8_push_all(&s->cmdfifo, buf, len);
+ esp_set_tc(s, esp_get_tc(s) - len);
} else {
- esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
+ len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
+ len = MIN(fifo8_num_free(&s->cmdfifo), len);
+ fifo8_push_all(&s->cmdfifo, buf, len);
esp_raise_drq(s);
- return;
}
trace_esp_handle_ti_cmd(cmdlen);
s->ti_size = 0;
- if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
- /* No command received */
- if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
- return;
- }
-
+ if (esp_get_tc(s) == 0) {
/* Command has been received */
- s->do_cmd = 0;
do_cmd(s);
- } else {
- /*
- * Extra message out bytes received: update cmdfifo_cdb_offset
- * and then switch to command phase
- */
- s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
- s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
- s->rregs[ESP_RSEQ] = SEQ_CD;
- s->rregs[ESP_RINTR] |= INTR_BS;
- esp_raise_irq(s);
}
- return;
- }
- if (!s->current_req) {
- return;
- }
- if (s->async_len == 0) {
- /* Defer until data is available. */
- return;
- }
- if (len > s->async_len) {
- len = s->async_len;
- }
- if (to_device) {
- if (s->dma_memory_read) {
- s->dma_memory_read(s->dma_opaque, s->async_buf, len);
- } else {
- esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
- esp_raise_drq(s);
+ break;
+
+ case STAT_DO:
+ if (!s->current_req) {
return;
}
- } else {
- if (s->dma_memory_write) {
- s->dma_memory_write(s->dma_opaque, s->async_buf, len);
- } else {
- /* Adjust TC for any leftover data in the FIFO */
- if (!fifo8_is_empty(&s->fifo)) {
- esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo));
+ if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) {
+ /* Defer until data is available. */
+ return;
+ }
+ if (len > s->async_len) {
+ len = s->async_len;
+ }
+
+ switch (s->rregs[ESP_CMD]) {
+ case CMD_TI | CMD_DMA:
+ if (s->dma_memory_read) {
+ s->dma_memory_read(s->dma_opaque, s->async_buf, len);
+ esp_set_tc(s, esp_get_tc(s) - len);
+ } else {
+ /* Copy FIFO data to device */
+ len = MIN(s->async_len, ESP_FIFO_SZ);
+ len = MIN(len, fifo8_num_used(&s->fifo));
+ len = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
+ esp_raise_drq(s);
+ }
+
+ s->async_buf += len;
+ s->async_len -= len;
+ s->ti_size += len;
+ break;
+
+ case CMD_PAD | CMD_DMA:
+ /* Copy TC zero bytes into the incoming stream */
+ if (!s->dma_memory_read) {
+ len = MIN(s->async_len, ESP_FIFO_SZ);
+ len = MIN(len, fifo8_num_free(&s->fifo));
+ }
+
+ memset(s->async_buf, 0, len);
+
+ s->async_buf += len;
+ s->async_len -= len;
+ s->ti_size += len;
+ break;
+ }
+
+ if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
+ /* Defer until the scsi layer has completed */
+ scsi_req_continue(s->current_req);
+ return;
+ }
+
+ esp_dma_ti_check(s);
+ break;
+
+ case STAT_DI:
+ if (!s->current_req) {
+ return;
+ }
+ if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) {
+ /* Defer until data is available. */
+ return;
+ }
+ if (len > s->async_len) {
+ len = s->async_len;
+ }
+
+ switch (s->rregs[ESP_CMD]) {
+ case CMD_TI | CMD_DMA:
+ if (s->dma_memory_write) {
+ s->dma_memory_write(s->dma_opaque, s->async_buf, len);
+ } else {
+ /* Copy device data to FIFO */
+ len = MIN(len, fifo8_num_free(&s->fifo));
+ fifo8_push_all(&s->fifo, s->async_buf, len);
+ esp_raise_drq(s);
}
- /* Copy device data to FIFO */
- len = MIN(len, fifo8_num_free(&s->fifo));
- fifo8_push_all(&s->fifo, s->async_buf, len);
s->async_buf += len;
s->async_len -= len;
s->ti_size -= len;
+ esp_set_tc(s, esp_get_tc(s) - len);
+ break;
- /*
- * MacOS toolbox uses a TI length of 16 bytes for all commands, so
- * commands shorter than this must be padded accordingly
- */
- if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) {
- while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) {
- esp_fifo_push(&s->fifo, 0);
- len++;
- }
+ case CMD_PAD | CMD_DMA:
+ /* Drop TC bytes from the incoming stream */
+ if (!s->dma_memory_write) {
+ len = MIN(len, fifo8_num_free(&s->fifo));
}
+ s->async_buf += len;
+ s->async_len -= len;
+ s->ti_size -= len;
esp_set_tc(s, esp_get_tc(s) - len);
- esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
- esp_raise_drq(s);
+ break;
+ }
- /* Indicate transfer to FIFO is complete */
- s->rregs[ESP_RSTAT] |= STAT_TC;
+ if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
+ /* If the guest underflows TC then terminate SCSI request */
+ scsi_req_continue(s->current_req);
return;
}
+
+ if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
+ /* Defer until the scsi layer has completed */
+ scsi_req_continue(s->current_req);
+ return;
+ }
+
+ esp_dma_ti_check(s);
+ break;
+
+ case STAT_ST:
+ switch (s->rregs[ESP_CMD]) {
+ case CMD_ICCS | CMD_DMA:
+ len = MIN(len, 1);
+
+ if (len) {
+ buf[0] = s->status;
+
+ if (s->dma_memory_write) {
+ s->dma_memory_write(s->dma_opaque, buf, len);
+ } else {
+ fifo8_push_all(&s->fifo, buf, len);
+ }
+
+ esp_set_tc(s, esp_get_tc(s) - len);
+ esp_set_phase(s, STAT_MI);
+
+ if (esp_get_tc(s) > 0) {
+ /* Process any message in phase data */
+ esp_do_dma(s);
+ }
+ }
+ break;
+
+ default:
+ /* Consume remaining data if the guest underflows TC */
+ if (fifo8_num_used(&s->fifo) < 2) {
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
+ esp_lower_drq(s);
+ }
+ break;
+ }
+ break;
+
+ case STAT_MI:
+ switch (s->rregs[ESP_CMD]) {
+ case CMD_ICCS | CMD_DMA:
+ len = MIN(len, 1);
+
+ if (len) {
+ buf[0] = 0;
+
+ if (s->dma_memory_write) {
+ s->dma_memory_write(s->dma_opaque, buf, len);
+ } else {
+ fifo8_push_all(&s->fifo, buf, len);
+ }
+
+ esp_set_tc(s, esp_get_tc(s) - len);
+
+ /* Raise end of command interrupt */
+ s->rregs[ESP_RINTR] |= INTR_FC;
+ esp_raise_irq(s);
+ }
+ break;
+ }
+ break;
}
- esp_set_tc(s, esp_get_tc(s) - len);
+}
+
+static void esp_nodma_ti_dataout(ESPState *s)
+{
+ int len;
+
+ if (!s->current_req) {
+ return;
+ }
+ if (s->async_len == 0) {
+ /* Defer until data is available. */
+ return;
+ }
+ len = MIN(s->async_len, ESP_FIFO_SZ);
+ len = MIN(len, fifo8_num_used(&s->fifo));
+ esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
s->async_buf += len;
s->async_len -= len;
- if (to_device) {
- s->ti_size += len;
- } else {
- s->ti_size -= len;
- }
+ s->ti_size += len;
+
if (s->async_len == 0) {
scsi_req_continue(s->current_req);
- /*
- * If there is still data to be read from the device then
- * complete the DMA operation immediately. Otherwise defer
- * until the scsi layer has completed.
- */
- if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
- return;
- }
+ return;
}
- /* Partially filled a scsi buffer. Complete immediately. */
- esp_dma_done(s);
- esp_lower_drq(s);
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
}
static void esp_do_nodma(ESPState *s)
{
- int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
+ uint8_t buf[ESP_FIFO_SZ];
uint32_t cmdlen;
int len;
- if (s->do_cmd) {
- cmdlen = fifo8_num_used(&s->cmdfifo);
- trace_esp_handle_ti_cmd(cmdlen);
- s->ti_size = 0;
- if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
- /* No command received */
- if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
- return;
+ switch (esp_get_phase(s)) {
+ case STAT_MO:
+ switch (s->rregs[ESP_CMD]) {
+ case CMD_SELATN:
+ /* Copy FIFO into cmdfifo */
+ len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
+ len = MIN(fifo8_num_free(&s->cmdfifo), len);
+ fifo8_push_all(&s->cmdfifo, buf, len);
+
+ if (fifo8_num_used(&s->cmdfifo) >= 1) {
+ /* First byte received, switch to command phase */
+ esp_set_phase(s, STAT_CD);
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->cmdfifo_cdb_offset = 1;
+
+ if (fifo8_num_used(&s->cmdfifo) > 1) {
+ /* Process any additional command phase data */
+ esp_do_nodma(s);
+ }
}
+ break;
- /* Command has been received */
- s->do_cmd = 0;
- do_cmd(s);
- } else {
- /*
- * Extra message out bytes received: update cmdfifo_cdb_offset
- * and then switch to command phase
- */
+ case CMD_SELATNS:
+ /* Copy one byte from FIFO into cmdfifo */
+ len = esp_fifo_pop_buf(&s->fifo, buf, 1);
+ len = MIN(fifo8_num_free(&s->cmdfifo), len);
+ fifo8_push_all(&s->cmdfifo, buf, len);
+
+ if (fifo8_num_used(&s->cmdfifo) >= 1) {
+ /* First byte received, stop in message out phase */
+ s->rregs[ESP_RSEQ] = SEQ_MO;
+ s->cmdfifo_cdb_offset = 1;
+
+ /* Raise command completion interrupt */
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
+ esp_raise_irq(s);
+ }
+ break;
+
+ case CMD_TI:
+ /* Copy FIFO into cmdfifo */
+ len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
+ len = MIN(fifo8_num_free(&s->cmdfifo), len);
+ fifo8_push_all(&s->cmdfifo, buf, len);
+
+ /* ATN remains asserted until FIFO empty */
s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
- s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
- s->rregs[ESP_RSEQ] = SEQ_CD;
+ esp_set_phase(s, STAT_CD);
+ s->rregs[ESP_CMD] = 0;
s->rregs[ESP_RINTR] |= INTR_BS;
esp_raise_irq(s);
+ break;
}
- return;
- }
+ break;
- if (!s->current_req) {
- return;
- }
+ case STAT_CD:
+ switch (s->rregs[ESP_CMD]) {
+ case CMD_TI:
+ /* Copy FIFO into cmdfifo */
+ len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
+ len = MIN(fifo8_num_free(&s->cmdfifo), len);
+ fifo8_push_all(&s->cmdfifo, buf, len);
- if (s->async_len == 0) {
- /* Defer until data is available. */
- return;
- }
+ cmdlen = fifo8_num_used(&s->cmdfifo);
+ trace_esp_handle_ti_cmd(cmdlen);
- if (to_device) {
- len = MIN(s->async_len, ESP_FIFO_SZ);
- len = MIN(len, fifo8_num_used(&s->fifo));
- esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
- s->async_buf += len;
- s->async_len -= len;
- s->ti_size += len;
- } else {
+ /* CDB may be transferred in one or more TI commands */
+ if (esp_cdb_length(s) && esp_cdb_length(s) ==
+ fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset) {
+ /* Command has been received */
+ do_cmd(s);
+ } else {
+ /*
+ * If data was transferred from the FIFO then raise bus
+ * service interrupt to indicate transfer complete. Otherwise
+ * defer until the next FIFO write.
+ */
+ if (len) {
+ /* Raise interrupt to indicate transfer complete */
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
+ }
+ }
+ break;
+
+ case CMD_SEL | CMD_DMA:
+ case CMD_SELATN | CMD_DMA:
+ /* Copy FIFO into cmdfifo */
+ len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
+ len = MIN(fifo8_num_free(&s->cmdfifo), len);
+ fifo8_push_all(&s->cmdfifo, buf, len);
+
+ /* Handle when DMA transfer is terminated by non-DMA FIFO write */
+ if (esp_cdb_length(s) && esp_cdb_length(s) ==
+ fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset) {
+ /* Command has been received */
+ do_cmd(s);
+ }
+ break;
+
+ case CMD_SEL:
+ case CMD_SELATN:
+ /* FIFO already contain entire CDB: copy to cmdfifo and execute */
+ len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
+ len = MIN(fifo8_num_free(&s->cmdfifo), len);
+ fifo8_push_all(&s->cmdfifo, buf, len);
+
+ do_cmd(s);
+ break;
+ }
+ break;
+
+ case STAT_DO:
+ /* Accumulate data in FIFO until non-DMA TI is executed */
+ break;
+
+ case STAT_DI:
+ if (!s->current_req) {
+ return;
+ }
+ if (s->async_len == 0) {
+ /* Defer until data is available. */
+ return;
+ }
if (fifo8_is_empty(&s->fifo)) {
fifo8_push(&s->fifo, s->async_buf[0]);
s->async_buf++;
s->async_len--;
s->ti_size--;
}
- }
- if (s->async_len == 0) {
- scsi_req_continue(s->current_req);
- return;
- }
+ if (s->async_len == 0) {
+ scsi_req_continue(s->current_req);
+ return;
+ }
- s->rregs[ESP_RINTR] |= INTR_BS;
- esp_raise_irq(s);
-}
+ /* If preloading the FIFO, defer until TI command issued */
+ if (s->rregs[ESP_CMD] != CMD_TI) {
+ return;
+ }
-static void esp_pdma_cb(ESPState *s)
-{
- switch (s->pdma_cb) {
- case SATN_PDMA_CB:
- satn_pdma_cb(s);
- break;
- case S_WITHOUT_SATN_PDMA_CB:
- s_without_satn_pdma_cb(s);
- break;
- case SATN_STOP_PDMA_CB:
- satn_stop_pdma_cb(s);
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
break;
- case WRITE_RESPONSE_PDMA_CB:
- write_response_pdma_cb(s);
+
+ case STAT_ST:
+ switch (s->rregs[ESP_CMD]) {
+ case CMD_ICCS:
+ fifo8_push(&s->fifo, s->status);
+ esp_set_phase(s, STAT_MI);
+
+ /* Process any message in phase data */
+ esp_do_nodma(s);
+ break;
+ }
break;
- case DO_DMA_PDMA_CB:
- do_dma_pdma_cb(s);
+
+ case STAT_MI:
+ switch (s->rregs[ESP_CMD]) {
+ case CMD_ICCS:
+ fifo8_push(&s->fifo, 0);
+
+ /* Raise end of command interrupt */
+ s->rregs[ESP_RINTR] |= INTR_FC;
+ esp_raise_irq(s);
+ break;
+ }
break;
- default:
- g_assert_not_reached();
}
}
void esp_command_complete(SCSIRequest *req, size_t resid)
{
ESPState *s = req->hba_private;
- int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
+ int to_device = (esp_get_phase(s) == STAT_DO);
trace_esp_command_complete();
@@ -830,7 +913,6 @@ void esp_command_complete(SCSIRequest *req, size_t resid)
if (s->ti_size != 0) {
trace_esp_command_complete_unexpected();
}
- s->ti_size = 0;
}
s->async_len = 0;
@@ -840,15 +922,38 @@ void esp_command_complete(SCSIRequest *req, size_t resid)
s->status = req->status;
/*
- * If the transfer is finished, switch to status phase. For non-DMA
- * transfers from the target the last byte is still in the FIFO
+ * Switch to status phase. For non-DMA transfers from the target the last
+ * byte is still in the FIFO
*/
- if (s->ti_size == 0) {
- s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
- esp_dma_done(s);
- esp_lower_drq(s);
+ s->ti_size = 0;
+
+ switch (s->rregs[ESP_CMD]) {
+ case CMD_SEL | CMD_DMA:
+ case CMD_SEL:
+ case CMD_SELATN | CMD_DMA:
+ case CMD_SELATN:
+ /*
+ * No data phase for sequencer command so raise deferred bus service
+ * and function complete interrupt
+ */
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ break;
+
+ case CMD_TI | CMD_DMA:
+ case CMD_TI:
+ s->rregs[ESP_CMD] = 0;
+ break;
}
+ /* Raise bus service interrupt to indicate change to STATUS phase */
+ esp_set_phase(s, STAT_ST);
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
+
+ /* Ensure DRQ is set correctly for TC underflow or normal completion */
+ esp_dma_ti_check(s);
+
if (s->current_req) {
scsi_req_unref(s->current_req);
s->current_req = NULL;
@@ -859,48 +964,66 @@ void esp_command_complete(SCSIRequest *req, size_t resid)
void esp_transfer_data(SCSIRequest *req, uint32_t len)
{
ESPState *s = req->hba_private;
- int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
uint32_t dmalen = esp_get_tc(s);
- assert(!s->do_cmd);
trace_esp_transfer_data(dmalen, s->ti_size);
s->async_len = len;
s->async_buf = scsi_req_get_buf(req);
- if (!to_device && !s->data_in_ready) {
- /*
- * Initial incoming data xfer is complete so raise command
- * completion interrupt
- */
- s->data_in_ready = true;
- s->rregs[ESP_RSTAT] |= STAT_TC;
- s->rregs[ESP_RINTR] |= INTR_BS;
- esp_raise_irq(s);
- }
+ if (!s->data_ready) {
+ s->data_ready = true;
- if (s->ti_cmd == 0) {
- /*
- * Always perform the initial transfer upon reception of the next TI
- * command to ensure the DMA/non-DMA status of the command is correct.
- * It is not possible to use s->dma directly in the section below as
- * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
- * async data transfer is delayed then s->dma is set incorrectly.
- */
- return;
- }
+ switch (s->rregs[ESP_CMD]) {
+ case CMD_SEL | CMD_DMA:
+ case CMD_SEL:
+ case CMD_SELATN | CMD_DMA:
+ case CMD_SELATN:
+ /*
+ * Initial incoming data xfer is complete for sequencer command
+ * so raise deferred bus service and function complete interrupt
+ */
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ break;
+
+ case CMD_SELATNS | CMD_DMA:
+ case CMD_SELATNS:
+ /*
+ * Initial incoming data xfer is complete so raise command
+ * completion interrupt
+ */
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ s->rregs[ESP_RSEQ] = SEQ_MO;
+ break;
- if (s->ti_cmd == (CMD_TI | CMD_DMA)) {
- if (dmalen) {
- esp_do_dma(s);
- } else if (s->ti_size <= 0) {
+ case CMD_TI | CMD_DMA:
+ case CMD_TI:
/*
- * If this was the last part of a DMA transfer then the
- * completion interrupt is deferred to here.
+ * Bus service interrupt raised because of initial change to
+ * DATA phase
*/
- esp_dma_done(s);
- esp_lower_drq(s);
+ s->rregs[ESP_CMD] = 0;
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ break;
}
- } else if (s->ti_cmd == CMD_TI) {
+
+ esp_raise_irq(s);
+ }
+
+ /*
+ * Always perform the initial transfer upon reception of the next TI
+ * command to ensure the DMA/non-DMA status of the command is correct.
+ * It is not possible to use s->dma directly in the section below as
+ * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
+ * async data transfer is delayed then s->dma is set incorrectly.
+ */
+
+ if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
+ /* When the SCSI layer returns more data, raise deferred INTR_BS */
+ esp_dma_ti_check(s);
+
+ esp_do_dma(s);
+ } else if (s->rregs[ESP_CMD] == CMD_TI) {
esp_do_nodma(s);
}
}
@@ -914,15 +1037,17 @@ static void handle_ti(ESPState *s)
return;
}
- s->ti_cmd = s->rregs[ESP_CMD];
if (s->dma) {
dmalen = esp_get_tc(s);
trace_esp_handle_ti(dmalen);
- s->rregs[ESP_RSTAT] &= ~STAT_TC;
esp_do_dma(s);
} else {
trace_esp_handle_ti(s->ti_size);
esp_do_nodma(s);
+
+ if (esp_get_phase(s) == STAT_DO) {
+ esp_nodma_ti_dataout(s);
+ }
}
}
@@ -936,7 +1061,6 @@ void esp_hard_reset(ESPState *s)
fifo8_reset(&s->fifo);
fifo8_reset(&s->cmdfifo);
s->dma = 0;
- s->do_cmd = 0;
s->dma_cb = NULL;
s->rregs[ESP_CFG1] = 7;
@@ -945,7 +1069,7 @@ void esp_hard_reset(ESPState *s)
static void esp_soft_reset(ESPState *s)
{
qemu_irq_lower(s->irq);
- qemu_irq_lower(s->irq_data);
+ qemu_irq_lower(s->drq_irq);
esp_hard_reset(s);
}
@@ -961,31 +1085,100 @@ static void parent_esp_reset(ESPState *s, int irq, int level)
}
}
+static void esp_run_cmd(ESPState *s)
+{
+ uint8_t cmd = s->rregs[ESP_CMD];
+
+ if (cmd & CMD_DMA) {
+ s->dma = 1;
+ /* Reload DMA counter. */
+ if (esp_get_stc(s) == 0) {
+ esp_set_tc(s, 0x10000);
+ } else {
+ esp_set_tc(s, esp_get_stc(s));
+ }
+ } else {
+ s->dma = 0;
+ }
+ switch (cmd & CMD_CMD) {
+ case CMD_NOP:
+ trace_esp_mem_writeb_cmd_nop(cmd);
+ break;
+ case CMD_FLUSH:
+ trace_esp_mem_writeb_cmd_flush(cmd);
+ fifo8_reset(&s->fifo);
+ break;
+ case CMD_RESET:
+ trace_esp_mem_writeb_cmd_reset(cmd);
+ esp_soft_reset(s);
+ break;
+ case CMD_BUSRESET:
+ trace_esp_mem_writeb_cmd_bus_reset(cmd);
+ esp_bus_reset(s);
+ if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
+ s->rregs[ESP_RINTR] |= INTR_RST;
+ esp_raise_irq(s);
+ }
+ break;
+ case CMD_TI:
+ trace_esp_mem_writeb_cmd_ti(cmd);
+ handle_ti(s);
+ break;
+ case CMD_ICCS:
+ trace_esp_mem_writeb_cmd_iccs(cmd);
+ write_response(s);
+ break;
+ case CMD_MSGACC:
+ trace_esp_mem_writeb_cmd_msgacc(cmd);
+ s->rregs[ESP_RINTR] |= INTR_DC;
+ s->rregs[ESP_RSEQ] = 0;
+ s->rregs[ESP_RFLAGS] = 0;
+ esp_raise_irq(s);
+ break;
+ case CMD_PAD:
+ trace_esp_mem_writeb_cmd_pad(cmd);
+ handle_pad(s);
+ break;
+ case CMD_SATN:
+ trace_esp_mem_writeb_cmd_satn(cmd);
+ break;
+ case CMD_RSTATN:
+ trace_esp_mem_writeb_cmd_rstatn(cmd);
+ break;
+ case CMD_SEL:
+ trace_esp_mem_writeb_cmd_sel(cmd);
+ handle_s_without_atn(s);
+ break;
+ case CMD_SELATN:
+ trace_esp_mem_writeb_cmd_selatn(cmd);
+ handle_satn(s);
+ break;
+ case CMD_SELATNS:
+ trace_esp_mem_writeb_cmd_selatns(cmd);
+ handle_satn_stop(s);
+ break;
+ case CMD_ENSEL:
+ trace_esp_mem_writeb_cmd_ensel(cmd);
+ s->rregs[ESP_RINTR] = 0;
+ break;
+ case CMD_DISSEL:
+ trace_esp_mem_writeb_cmd_dissel(cmd);
+ s->rregs[ESP_RINTR] = 0;
+ esp_raise_irq(s);
+ break;
+ default:
+ trace_esp_error_unhandled_command(cmd);
+ break;
+ }
+}
+
uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
{
uint32_t val;
switch (saddr) {
case ESP_FIFO:
- if (s->dma_memory_read && s->dma_memory_write &&
- (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
- /* Data out. */
- qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
- s->rregs[ESP_FIFO] = 0;
- } else {
- if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) {
- if (s->ti_size) {
- esp_do_nodma(s);
- } else {
- /*
- * The last byte of a non-DMA transfer has been read out
- * of the FIFO so switch to status phase
- */
- s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
- }
- }
- s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo);
- }
+ s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo);
val = s->rregs[ESP_FIFO];
break;
case ESP_RINTR:
@@ -995,7 +1188,8 @@ uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
*/
val = s->rregs[ESP_RINTR];
s->rregs[ESP_RINTR] = 0;
- s->rregs[ESP_RSTAT] &= ~STAT_TC;
+ esp_lower_irq(s);
+ s->rregs[ESP_RSTAT] &= STAT_TC | 7;
/*
* According to the datasheet ESP_RSEQ should be cleared, but as the
* emulation currently defers information transfers to the next TI
@@ -1005,7 +1199,6 @@ uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
*
* s->rregs[ESP_RSEQ] = SEQ_0;
*/
- esp_lower_irq(s);
break;
case ESP_TCHI:
/* Return the unique id if the value has never been written */
@@ -1040,108 +1233,14 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
s->rregs[ESP_RSTAT] &= ~STAT_TC;
break;
case ESP_FIFO:
- if (s->do_cmd) {
- esp_fifo_push(&s->cmdfifo, val);
-
- /*
- * If any unexpected message out/command phase data is
- * transferred using non-DMA, raise the interrupt
- */
- if (s->rregs[ESP_CMD] == CMD_TI) {
- s->rregs[ESP_RINTR] |= INTR_BS;
- esp_raise_irq(s);
- }
- } else {
+ if (!fifo8_is_full(&s->fifo)) {
esp_fifo_push(&s->fifo, val);
}
+ esp_do_nodma(s);
break;
case ESP_CMD:
s->rregs[saddr] = val;
- if (val & CMD_DMA) {
- s->dma = 1;
- /* Reload DMA counter. */
- if (esp_get_stc(s) == 0) {
- esp_set_tc(s, 0x10000);
- } else {
- esp_set_tc(s, esp_get_stc(s));
- }
- } else {
- s->dma = 0;
- }
- switch (val & CMD_CMD) {
- case CMD_NOP:
- trace_esp_mem_writeb_cmd_nop(val);
- break;
- case CMD_FLUSH:
- trace_esp_mem_writeb_cmd_flush(val);
- fifo8_reset(&s->fifo);
- break;
- case CMD_RESET:
- trace_esp_mem_writeb_cmd_reset(val);
- esp_soft_reset(s);
- break;
- case CMD_BUSRESET:
- trace_esp_mem_writeb_cmd_bus_reset(val);
- esp_bus_reset(s);
- if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
- s->rregs[ESP_RINTR] |= INTR_RST;
- esp_raise_irq(s);
- }
- break;
- case CMD_TI:
- trace_esp_mem_writeb_cmd_ti(val);
- handle_ti(s);
- break;
- case CMD_ICCS:
- trace_esp_mem_writeb_cmd_iccs(val);
- write_response(s);
- s->rregs[ESP_RINTR] |= INTR_FC;
- s->rregs[ESP_RSTAT] |= STAT_MI;
- break;
- case CMD_MSGACC:
- trace_esp_mem_writeb_cmd_msgacc(val);
- s->rregs[ESP_RINTR] |= INTR_DC;
- s->rregs[ESP_RSEQ] = 0;
- s->rregs[ESP_RFLAGS] = 0;
- esp_raise_irq(s);
- break;
- case CMD_PAD:
- trace_esp_mem_writeb_cmd_pad(val);
- s->rregs[ESP_RSTAT] = STAT_TC;
- s->rregs[ESP_RINTR] |= INTR_FC;
- s->rregs[ESP_RSEQ] = 0;
- break;
- case CMD_SATN:
- trace_esp_mem_writeb_cmd_satn(val);
- break;
- case CMD_RSTATN:
- trace_esp_mem_writeb_cmd_rstatn(val);
- break;
- case CMD_SEL:
- trace_esp_mem_writeb_cmd_sel(val);
- handle_s_without_atn(s);
- break;
- case CMD_SELATN:
- trace_esp_mem_writeb_cmd_selatn(val);
- handle_satn(s);
- break;
- case CMD_SELATNS:
- trace_esp_mem_writeb_cmd_selatns(val);
- handle_satn_stop(s);
- break;
- case CMD_ENSEL:
- trace_esp_mem_writeb_cmd_ensel(val);
- s->rregs[ESP_RINTR] = 0;
- break;
- case CMD_DISSEL:
- trace_esp_mem_writeb_cmd_dissel(val);
- s->rregs[ESP_RINTR] = 0;
- esp_raise_irq(s);
- break;
- default:
- trace_esp_error_unhandled_command(val);
- break;
- }
+ esp_run_cmd(s);
break;
case ESP_WBUSID ... ESP_WSYNO:
break;
@@ -1190,6 +1289,14 @@ static bool esp_is_version_6(void *opaque, int version_id)
return version_id >= 6;
}
+static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
+{
+ ESPState *s = ESP(opaque);
+
+ version_id = MIN(version_id, s->mig_version_id);
+ return version_id >= 5 && version_id <= 6;
+}
+
int esp_pre_save(void *opaque)
{
ESPState *s = ESP(object_resolve_path_component(
@@ -1225,36 +1332,9 @@ static int esp_post_load(void *opaque, int version_id)
return 0;
}
-/*
- * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the
- * guest CPU to perform the transfers between the SCSI bus and memory
- * itself. This is indicated by the dma_memory_read and dma_memory_write
- * functions being NULL (in contrast to the ESP PCI device) whilst
- * dma_enabled is still set.
- */
-
-static bool esp_pdma_needed(void *opaque)
-{
- ESPState *s = ESP(opaque);
-
- return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
- s->dma_enabled;
-}
-
-static const VMStateDescription vmstate_esp_pdma = {
- .name = "esp/pdma",
- .version_id = 0,
- .minimum_version_id = 0,
- .needed = esp_pdma_needed,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT8(pdma_cb, ESPState),
- VMSTATE_END_OF_LIST()
- }
-};
-
const VMStateDescription vmstate_esp = {
.name = "esp",
- .version_id = 6,
+ .version_id = 7,
.minimum_version_id = 3,
.post_load = esp_post_load,
.fields = (const VMStateField[]) {
@@ -1278,18 +1358,16 @@ const VMStateDescription vmstate_esp = {
VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
VMSTATE_UINT32(do_cmd, ESPState),
VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
- VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
+ VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
- VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5),
+ VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
+ esp_is_between_version_5_and_6),
VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
+ VMSTATE_BOOL(drq_state, ESPState),
VMSTATE_END_OF_LIST()
},
- .subsections = (const VMStateDescription * const []) {
- &vmstate_esp_pdma,
- NULL
- }
};
static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
@@ -1338,7 +1416,7 @@ static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
esp_pdma_write(s, val);
break;
}
- esp_pdma_cb(s);
+ esp_do_dma(s);
}
static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
@@ -1359,9 +1437,7 @@ static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
val = (val << 8) | esp_pdma_read(s);
break;
}
- if (fifo8_num_used(&s->fifo) < 2) {
- esp_pdma_cb(s);
- }
+ esp_do_dma(s);
return val;
}
@@ -1421,7 +1497,7 @@ static void sysbus_esp_realize(DeviceState *dev, Error **errp)
}
sysbus_init_irq(sbd, &s->irq);
- sysbus_init_irq(sbd, &s->irq_data);
+ sysbus_init_irq(sbd, &s->drq_irq);
assert(sysbus->it_shift != -1);
s->chip_id = TCHI_FAS100A;
@@ -1474,14 +1550,6 @@ static void sysbus_esp_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
-static const TypeInfo sysbus_esp_info = {
- .name = TYPE_SYSBUS_ESP,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_init = sysbus_esp_init,
- .instance_size = sizeof(SysBusESPState),
- .class_init = sysbus_esp_class_init,
-};
-
static void esp_finalize(Object *obj)
{
ESPState *s = ESP(obj);
@@ -1507,19 +1575,22 @@ static void esp_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
-static const TypeInfo esp_info = {
- .name = TYPE_ESP,
- .parent = TYPE_DEVICE,
- .instance_init = esp_init,
- .instance_finalize = esp_finalize,
- .instance_size = sizeof(ESPState),
- .class_init = esp_class_init,
+static const TypeInfo esp_info_types[] = {
+ {
+ .name = TYPE_SYSBUS_ESP,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_init = sysbus_esp_init,
+ .instance_size = sizeof(SysBusESPState),
+ .class_init = sysbus_esp_class_init,
+ },
+ {
+ .name = TYPE_ESP,
+ .parent = TYPE_DEVICE,
+ .instance_init = esp_init,
+ .instance_finalize = esp_finalize,
+ .instance_size = sizeof(ESPState),
+ .class_init = esp_class_init,
+ },
};
-static void esp_register_types(void)
-{
- type_register_static(&sysbus_esp_info);
- type_register_static(&esp_info);
-}
-
-type_init(esp_register_types)
+DEFINE_TYPES(esp_info_types)
diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events
index bdd4e2c..d72f741 100644
--- a/hw/scsi/trace-events
+++ b/hw/scsi/trace-events
@@ -197,6 +197,7 @@ esp_mem_writeb_cmd_selatns(uint32_t val) "Select with ATN & stop (0x%2.2x)"
esp_mem_writeb_cmd_ensel(uint32_t val) "Enable selection (0x%2.2x)"
esp_mem_writeb_cmd_dissel(uint32_t val) "Disable selection (0x%2.2x)"
esp_mem_writeb_cmd_ti(uint32_t val) "Transfer Information (0x%2.2x)"
+esp_set_phase(const char *phase) "setting bus phase to %s"
# esp-pci.c
esp_pci_error_invalid_dma_direction(void) "invalid DMA transfer direction"
diff --git a/include/hw/scsi/esp.h b/include/hw/scsi/esp.h
index 13b1749..533d856 100644
--- a/include/hw/scsi/esp.h
+++ b/include/hw/scsi/esp.h
@@ -25,7 +25,8 @@ struct ESPState {
uint8_t rregs[ESP_REGS];
uint8_t wregs[ESP_REGS];
qemu_irq irq;
- qemu_irq irq_data;
+ qemu_irq drq_irq;
+ bool drq_state;
uint8_t chip_id;
bool tchi_written;
int32_t ti_size;
@@ -40,8 +41,7 @@ struct ESPState {
uint8_t lun;
uint32_t do_cmd;
- bool data_in_ready;
- uint8_t ti_cmd;
+ bool data_ready;
int dma_enabled;
uint32_t async_len;
@@ -51,7 +51,6 @@ struct ESPState {
ESPDMAMemoryReadWriteFunc dma_memory_write;
void *dma_opaque;
void (*dma_cb)(ESPState *s);
- uint8_t pdma_cb;
uint8_t mig_version_id;
@@ -63,6 +62,8 @@ struct ESPState {
uint8_t mig_ti_buf[ESP_FIFO_SZ];
uint8_t mig_cmdbuf[ESP_CMDFIFO_SZ];
uint32_t mig_cmdlen;
+
+ uint8_t mig_ti_cmd;
};
#define TYPE_SYSBUS_ESP "sysbus-esp"
@@ -150,15 +151,6 @@ struct SysBusESPState {
#define TCHI_FAS100A 0x4
#define TCHI_AM53C974 0x12
-/* PDMA callbacks */
-enum pdma_cb {
- SATN_PDMA_CB = 0,
- S_WITHOUT_SATN_PDMA_CB = 1,
- SATN_STOP_PDMA_CB = 2,
- WRITE_RESPONSE_PDMA_CB = 3,
- DO_DMA_PDMA_CB = 4
-};
-
void esp_dma_enable(ESPState *s, int irq, int level);
void esp_request_cancelled(SCSIRequest *req);
void esp_command_complete(SCSIRequest *req, size_t resid);