aboutsummaryrefslogtreecommitdiff
path: root/hw/ide
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2013-06-28 13:30:01 +0200
committerAlexander Graf <agraf@suse.de>2013-07-11 18:51:25 +0200
commit80fc95d8bdaf3392106b131a97ca701fd374489a (patch)
treecc2c5277f074224385ee19e9066fca8ab56cd339 /hw/ide
parentcae323572eddc1a45e2f6ef98c006d98fed23b1e (diff)
downloadqemu-80fc95d8bdaf3392106b131a97ca701fd374489a.zip
qemu-80fc95d8bdaf3392106b131a97ca701fd374489a.tar.gz
qemu-80fc95d8bdaf3392106b131a97ca701fd374489a.tar.bz2
PPC: dbdma: Support unaligned DMA access
The DBDMA engine really just reads bytes from a producing device (IDE in our case) and shoves these bytes into memory. It doesn't care whether any alignment takes place or not. Our code today however assumes that block accesses always happen on sector (512 byte) boundaries. This is a fair assumption for most cases. However, Mac OS X really likes to do unaligned, incomplete accesses that it finishes with the next DMA request. So we need to read / write the unaligned bits independent of the actual asynchronous request, because that one can only handle 512-byte-aligned data. We also need to cache these unaligned sectors until the next DMA request, at which point the data might be successfully flushed from the pipe. Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'hw/ide')
-rw-r--r--hw/ide/macio.c131
1 files changed, 124 insertions, 7 deletions
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index 2b1e51d..18bc1d1 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -56,11 +56,13 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
IDEState *s = idebus_active_if(&m->bus);
+ int unaligned;
if (ret < 0) {
m->aiocb = NULL;
qemu_sglist_destroy(&s->sg);
ide_atapi_io_error(s, ret);
+ io->remainder_len = 0;
goto done;
}
@@ -85,7 +87,31 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
s->io_buffer_index &= 0x7ff;
}
- if (s->packet_transfer_size <= 0) {
+ s->io_buffer_size = io->len;
+
+ MACIO_DPRINTF("remainder: %d io->len: %d size: %d\n", io->remainder_len,
+ io->len, s->packet_transfer_size);
+ if (io->remainder_len && io->len) {
+ /* guest wants the rest of its previous transfer */
+ int remainder_len = MIN(io->remainder_len, io->len);
+
+ MACIO_DPRINTF("copying remainder %d bytes\n", remainder_len);
+
+ cpu_physical_memory_write(io->addr, io->remainder + 0x200 -
+ remainder_len, remainder_len);
+
+ io->addr += remainder_len;
+ io->len -= remainder_len;
+ s->io_buffer_size = remainder_len;
+ io->remainder_len -= remainder_len;
+ /* treat remainder as individual transfer, start again */
+ qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
+ &address_space_memory);
+ pmac_ide_atapi_transfer_cb(opaque, 0);
+ return;
+ }
+
+ if (!s->packet_transfer_size) {
MACIO_DPRINTF("end of transfer\n");
ide_atapi_cmd_ok(s);
m->dma_active = false;
@@ -98,14 +124,40 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
/* launch next transfer */
- MACIO_DPRINTF("io->len = %#x\n", io->len);
+ /* handle unaligned accesses first, get them over with and only do the
+ remaining bulk transfer using our async DMA helpers */
+ unaligned = io->len & 0x1ff;
+ if (unaligned) {
+ int sector_num = (s->lba << 2) + (s->io_buffer_index >> 9);
+ int nsector = io->len >> 9;
- s->io_buffer_size = io->len;
+ MACIO_DPRINTF("precopying unaligned %d bytes to %#lx\n",
+ unaligned, io->addr + io->len - unaligned);
+
+ bdrv_read(s->bs, sector_num + nsector, io->remainder, 1);
+ cpu_physical_memory_write(io->addr + io->len - unaligned,
+ io->remainder, unaligned);
+
+ io->len -= unaligned;
+ }
+
+ MACIO_DPRINTF("io->len = %#x\n", io->len);
qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
&address_space_memory);
qemu_sglist_add(&s->sg, io->addr, io->len);
- io->addr += io->len;
+ io->addr += s->io_buffer_size;
+ io->remainder_len = MIN(s->packet_transfer_size - s->io_buffer_size,
+ (0x200 - unaligned) & 0x1ff);
+ MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len);
+
+ /* We would read no data from the block layer, thus not get a callback.
+ Just fake completion manually. */
+ if (!io->len) {
+ pmac_ide_atapi_transfer_cb(opaque, 0);
+ return;
+ }
+
io->len = 0;
MACIO_DPRINTF("sector_num=%d size=%d, cmd_cmd=%d\n",
@@ -128,14 +180,16 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
IDEState *s = idebus_active_if(&m->bus);
- int n;
+ int n = 0;
int64_t sector_num;
+ int unaligned;
if (ret < 0) {
MACIO_DPRINTF("DMA error\n");
m->aiocb = NULL;
qemu_sglist_destroy(&s->sg);
ide_dma_error(s);
+ io->remainder_len = 0;
goto done;
}
@@ -158,7 +212,33 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
s->nsector -= n;
}
- if (s->nsector == 0) {
+ MACIO_DPRINTF("remainder: %d io->len: %d nsector: %d sector_num: %ld\n",
+ io->remainder_len, io->len, s->nsector, sector_num);
+ if (io->remainder_len && io->len) {
+ /* guest wants the rest of its previous transfer */
+ int remainder_len = MIN(io->remainder_len, io->len);
+ uint8_t *p = &io->remainder[0x200 - remainder_len];
+
+ MACIO_DPRINTF("copying remainder %d bytes at %#lx\n",
+ remainder_len, io->addr);
+
+ switch (s->dma_cmd) {
+ case IDE_DMA_READ:
+ cpu_physical_memory_write(io->addr, p, remainder_len);
+ break;
+ case IDE_DMA_WRITE:
+ cpu_physical_memory_read(io->addr, p, remainder_len);
+ bdrv_write(s->bs, sector_num - 1, io->remainder, 1);
+ break;
+ case IDE_DMA_TRIM:
+ break;
+ }
+ io->addr += remainder_len;
+ io->len -= remainder_len;
+ io->remainder_len -= remainder_len;
+ }
+
+ if (s->nsector == 0 && !io->remainder_len) {
MACIO_DPRINTF("end of transfer\n");
s->status = READY_STAT | SEEK_STAT;
ide_set_irq(s->bus);
@@ -175,12 +255,49 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
s->io_buffer_index = 0;
s->io_buffer_size = io->len;
+ /* handle unaligned accesses first, get them over with and only do the
+ remaining bulk transfer using our async DMA helpers */
+ unaligned = io->len & 0x1ff;
+ if (unaligned) {
+ int nsector = io->len >> 9;
+
+ MACIO_DPRINTF("precopying unaligned %d bytes to %#lx\n",
+ unaligned, io->addr + io->len - unaligned);
+
+ switch (s->dma_cmd) {
+ case IDE_DMA_READ:
+ bdrv_read(s->bs, sector_num + nsector, io->remainder, 1);
+ cpu_physical_memory_write(io->addr + io->len - unaligned,
+ io->remainder, unaligned);
+ break;
+ case IDE_DMA_WRITE:
+ /* cache the contents in our io struct */
+ cpu_physical_memory_read(io->addr + io->len - unaligned,
+ io->remainder, unaligned);
+ break;
+ case IDE_DMA_TRIM:
+ break;
+ }
+
+ io->len -= unaligned;
+ }
+
MACIO_DPRINTF("io->len = %#x\n", io->len);
qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
&address_space_memory);
qemu_sglist_add(&s->sg, io->addr, io->len);
- io->addr += io->len;
+ io->addr += io->len + unaligned;
+ io->remainder_len = (0x200 - unaligned) & 0x1ff;
+ MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len);
+
+ /* We would read no data from the block layer, thus not get a callback.
+ Just fake completion manually. */
+ if (!io->len) {
+ pmac_ide_transfer_cb(opaque, 0);
+ return;
+ }
+
io->len = 0;
MACIO_DPRINTF("sector_num=%" PRId64 " n=%d, nsector=%d, cmd_cmd=%d\n",