summaryrefslogtreecommitdiff
path: root/hw/ide
diff options
context:
space:
mode:
authorMark Cave-Ayland <mark.cave-ayland@ilande.co.uk>2016-06-10 19:26:37 +0100
committerDavid Gibson <david@gibson.dropbear.id.au>2016-06-14 10:43:24 +1000
commitbc9ca5958d084222cdb233619dfc5046c81fb76d (patch)
treeeff6cfa9bc61e29b41a1077cb445fb84e214415a /hw/ide
parent42bff4772ef96d901772240b10eda6d66ef771a1 (diff)
downloadqemu-bc9ca5958d084222cdb233619dfc5046c81fb76d.tar.gz
qemu-bc9ca5958d084222cdb233619dfc5046c81fb76d.tar.bz2
qemu-bc9ca5958d084222cdb233619dfc5046c81fb76d.zip
macio: call dma_memory_unmap() at the end of each DMA transfer
This ensures that the underlying memory is marked dirty once the transfer is complete and resolves cache coherency problems under MacOS 9. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'hw/ide')
-rw-r--r--hw/ide/macio.c46
1 files changed, 26 insertions, 20 deletions
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index 78c10a0406..fa57352fc8 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -66,8 +66,7 @@ static void pmac_dma_read(BlockBackend *blk,
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
IDEState *s = idebus_active_if(&m->bus);
- dma_addr_t dma_addr, dma_len;
- void *mem;
+ dma_addr_t dma_addr;
int64_t sector_num;
int nsector;
uint64_t align = BDRV_SECTOR_SIZE;
@@ -84,9 +83,10 @@ static void pmac_dma_read(BlockBackend *blk,
sector_num, nsector);
dma_addr = io->addr;
- dma_len = io->len;
- mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
- DMA_DIRECTION_FROM_DEVICE);
+ io->dir = DMA_DIRECTION_FROM_DEVICE;
+ io->dma_len = io->len;
+ io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
+ io->dir);
if (offset & (align - 1)) {
head_bytes = offset & (align - 1);
@@ -100,7 +100,7 @@ static void pmac_dma_read(BlockBackend *blk,
offset = offset & ~(align - 1);
}
- qemu_iovec_add(&io->iov, mem, io->len);
+ qemu_iovec_add(&io->iov, io->dma_mem, io->len);
if ((offset + bytes) & (align - 1)) {
tail_bytes = (offset + bytes) & (align - 1);
@@ -130,8 +130,7 @@ static void pmac_dma_write(BlockBackend *blk,
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
IDEState *s = idebus_active_if(&m->bus);
- dma_addr_t dma_addr, dma_len;
- void *mem;
+ dma_addr_t dma_addr;
int64_t sector_num;
int nsector;
uint64_t align = BDRV_SECTOR_SIZE;
@@ -149,9 +148,10 @@ static void pmac_dma_write(BlockBackend *blk,
sector_num, nsector);
dma_addr = io->addr;
- dma_len = io->len;
- mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
- DMA_DIRECTION_TO_DEVICE);
+ io->dir = DMA_DIRECTION_TO_DEVICE;
+ io->dma_len = io->len;
+ io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
+ io->dir);
if (offset & (align - 1)) {
head_bytes = offset & (align - 1);
@@ -163,7 +163,7 @@ static void pmac_dma_write(BlockBackend *blk,
blk_pread(s->blk, (sector_num << 9), &io->head_remainder, align);
qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
- qemu_iovec_add(&io->iov, mem, io->len);
+ qemu_iovec_add(&io->iov, io->dma_mem, io->len);
bytes += offset & (align - 1);
offset = offset & ~(align - 1);
@@ -181,7 +181,7 @@ static void pmac_dma_write(BlockBackend *blk,
blk_pread(s->blk, (sector_num << 9), &io->tail_remainder, align);
if (!unaligned_head) {
- qemu_iovec_add(&io->iov, mem, io->len);
+ qemu_iovec_add(&io->iov, io->dma_mem, io->len);
}
qemu_iovec_add(&io->iov, &io->tail_remainder + tail_bytes,
@@ -193,7 +193,7 @@ static void pmac_dma_write(BlockBackend *blk,
}
if (!unaligned_head && !unaligned_tail) {
- qemu_iovec_add(&io->iov, mem, io->len);
+ qemu_iovec_add(&io->iov, io->dma_mem, io->len);
}
s->io_buffer_size -= io->len;
@@ -214,18 +214,18 @@ static void pmac_dma_trim(BlockBackend *blk,
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
IDEState *s = idebus_active_if(&m->bus);
- dma_addr_t dma_addr, dma_len;
- void *mem;
+ dma_addr_t dma_addr;
qemu_iovec_destroy(&io->iov);
qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
dma_addr = io->addr;
- dma_len = io->len;
- mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
- DMA_DIRECTION_TO_DEVICE);
+ io->dir = DMA_DIRECTION_TO_DEVICE;
+ io->dma_len = io->len;
+ io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
+ io->dir);
- qemu_iovec_add(&io->iov, mem, io->len);
+ qemu_iovec_add(&io->iov, io->dma_mem, io->len);
s->io_buffer_size -= io->len;
s->io_buffer_index += io->len;
io->len = 0;
@@ -285,6 +285,9 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
return;
done:
+ dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
+ io->dir, io->dma_len);
+
if (ret < 0) {
block_acct_failed(blk_get_stats(s->blk), &s->acct);
} else {
@@ -351,6 +354,9 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
return;
done:
+ dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
+ io->dir, io->dma_len);
+
if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
if (ret < 0) {
block_acct_failed(blk_get_stats(s->blk), &s->acct);