diff options
author | aliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162> | 2008-10-31 17:25:56 +0000 |
---|---|---|
committer | aliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162> | 2008-10-31 17:25:56 +0000 |
commit | 492c30af2567a59413c064f88eb81e1691865195 (patch) | |
tree | 6f95d3dacabf2f99be4d0b60e41ad855385ee055 /hw/dma.c | |
parent | 1b435b10324fe9937f254bb00718f78d5e50837a (diff) | |
download | qemu-492c30af2567a59413c064f88eb81e1691865195.zip qemu-492c30af2567a59413c064f88eb81e1691865195.tar.gz qemu-492c30af2567a59413c064f88eb81e1691865195.tar.bz2 |
Make DMA bottom-half driven (v2)
The current DMA routines are driven by a call in main_loop_wait() after every
select.
This patch converts the DMA code to be driven by a constantly rescheduled
bottom half. The advantage of using a scheduled bottom half is that we can
stop scheduling the bottom half when there no DMA channels are runnable. This
means we can potentially detect this case and sleep longer in the main loop.
The only two architectures implementing DMA_run() are cris and i386. For cris,
I converted it to a simple repeating bottom half. I've only compile tested
this as cris does not seem to work on a 64-bit host. It should be functionally
identical to the previous implementation so I expect it to work.
For x86, I've made sure to only fire the DMA bottom half if there is a DMA
channel that is runnable. The effect of this is that unless you're using sb16
or a floppy disk, the DMA bottom half never fires.
You probably should test this malc. My own benchmarks actually show slight
improvement by it's possible the change in timing could affect your demos.
Since v1, I've changed the code to use a BH instead of a timer. cris at least
seems to depend on faster than 10ms polling.
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5573 c046a42c-6fe2-441c-8c8c-71466251a162
Diffstat (limited to 'hw/dma.c')
-rw-r--r-- | hw/dma.c | 30 |
1 files changed, 28 insertions, 2 deletions
@@ -78,6 +78,8 @@ enum { }; +static void DMA_run (void); + static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0}; static void write_page (void *opaque, uint32_t nport, uint32_t data) @@ -214,6 +216,7 @@ static void write_cont (void *opaque, uint32_t nport, uint32_t data) d->status &= ~(1 << (ichan + 4)); } d->status &= ~(1 << ichan); + DMA_run(); break; case 0x0a: /* single mask */ @@ -221,6 +224,7 @@ static void write_cont (void *opaque, uint32_t nport, uint32_t data) d->mask |= 1 << (data & 3); else d->mask &= ~(1 << (data & 3)); + DMA_run(); break; case 0x0b: /* mode */ @@ -255,10 +259,12 @@ static void write_cont (void *opaque, uint32_t nport, uint32_t data) case 0x0e: /* clear mask for all channels */ d->mask = 0; + DMA_run(); break; case 0x0f: /* write mask for all channels */ d->mask = data; + DMA_run(); break; default: @@ -310,6 +316,7 @@ void DMA_hold_DREQ (int nchan) ichan = nchan & 3; linfo ("held cont=%d chan=%d\n", ncont, ichan); dma_controllers[ncont].status |= 1 << (ichan + 4); + DMA_run(); } void DMA_release_DREQ (int nchan) @@ -320,6 +327,7 @@ void DMA_release_DREQ (int nchan) ichan = nchan & 3; linfo ("released cont=%d chan=%d\n", ncont, ichan); dma_controllers[ncont].status &= ~(1 << (ichan + 4)); + DMA_run(); } static void channel_run (int ncont, int ichan) @@ -347,10 +355,13 @@ static void channel_run (int ncont, int ichan) ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont); } -void DMA_run (void) +static QEMUBH *dma_bh; + +static void DMA_run (void) { struct dma_cont *d; int icont, ichan; + int rearm = 0; d = dma_controllers; @@ -360,10 +371,20 @@ void DMA_run (void) mask = 1 << ichan; - if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) + if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) { channel_run (icont, ichan); + rearm = 1; + } } } + + if (rearm) + qemu_bh_schedule_idle(dma_bh); +} + +static void DMA_run_bh(void *unused) +{ + DMA_run(); } void DMA_register_channel (int nchan, @@ -534,6 +555,9 @@ static int dma_load (QEMUFile *f, void *opaque, int version_id) qemu_get_8s (f, &r->dack); qemu_get_8s (f, &r->eop); } + + DMA_run(); + return 0; } @@ -545,4 +569,6 @@ void DMA_init (int high_page_enable) high_page_enable ? 0x488 : -1); register_savevm ("dma", 0, 1, dma_save, dma_load, &dma_controllers[0]); register_savevm ("dma", 1, 1, dma_save, dma_load, &dma_controllers[1]); + + dma_bh = qemu_bh_new(DMA_run_bh, NULL); } |