aboutsummaryrefslogtreecommitdiffstats
path: root/hw/dma
diff options
context:
space:
mode:
authorTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
committerTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
commite02cda008591317b1625707ff8e115a4841aa889 (patch)
treeaee302e3cf8b59ec2d32ec481be3d1afddfc8968 /hw/dma
parentcc668e6b7e0ffd8c9d130513d12053cf5eda1d3b (diff)
Introduce Virtio-loopback epsilon release:
Epsilon release introduces a new compatibility layer which make virtio-loopback design to work with QEMU and rust-vmm vhost-user backend without require any changes. Signed-off-by: Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com> Change-Id: I52e57563e08a7d0bdc002f8e928ee61ba0c53dd9
Diffstat (limited to 'hw/dma')
-rw-r--r--hw/dma/Kconfig32
-rw-r--r--hw/dma/bcm2835_dma.c410
-rw-r--r--hw/dma/etraxfs_dma.c780
-rw-r--r--hw/dma/i82374.c168
-rw-r--r--hw/dma/i8257.c657
-rw-r--r--hw/dma/meson.build16
-rw-r--r--hw/dma/omap_dma.c2124
-rw-r--r--hw/dma/pl080.c449
-rw-r--r--hw/dma/pl330.c1702
-rw-r--r--hw/dma/pxa2xx_dma.c591
-rw-r--r--hw/dma/rc4030.c754
-rw-r--r--hw/dma/sifive_pdma.c351
-rw-r--r--hw/dma/soc_dma.c361
-rw-r--r--hw/dma/sparc32_dma.c449
-rw-r--r--hw/dma/trace-events46
-rw-r--r--hw/dma/trace.h1
-rw-r--r--hw/dma/xilinx_axidma.c662
-rw-r--r--hw/dma/xlnx-zdma.c847
-rw-r--r--hw/dma/xlnx-zynq-devcfg.c402
-rw-r--r--hw/dma/xlnx_csu_dma.c743
-rw-r--r--hw/dma/xlnx_dpdma.c790
21 files changed, 12335 insertions, 0 deletions
diff --git a/hw/dma/Kconfig b/hw/dma/Kconfig
new file mode 100644
index 000000000..98fbb1bb0
--- /dev/null
+++ b/hw/dma/Kconfig
@@ -0,0 +1,32 @@
+config RC4030
+ bool
+
+config PL080
+ bool
+
+config PL330
+ bool
+
+config I82374
+ bool
+ select I8257
+
+config I8257
+ bool
+
+config ZYNQ_DEVCFG
+ bool
+ select REGISTER
+
+config XLNX_ZDMA
+ bool
+
+config STP2000
+ bool
+
+config SIFIVE_PDMA
+ bool
+
+config XLNX_CSU_DMA
+ bool
+ select REGISTER
diff --git a/hw/dma/bcm2835_dma.c b/hw/dma/bcm2835_dma.c
new file mode 100644
index 000000000..eb0002a2b
--- /dev/null
+++ b/hw/dma/bcm2835_dma.c
@@ -0,0 +1,410 @@
+/*
+ * Raspberry Pi emulation (c) 2012 Gregory Estrade
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/dma/bcm2835_dma.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+
+/* DMA CS Control and Status bits */
+#define BCM2708_DMA_ACTIVE (1 << 0)
+#define BCM2708_DMA_END (1 << 1) /* GE */
+#define BCM2708_DMA_INT (1 << 2)
+#define BCM2708_DMA_ISPAUSED (1 << 4) /* Pause requested or not active */
+#define BCM2708_DMA_ISHELD (1 << 5) /* Is held by DREQ flow control */
+#define BCM2708_DMA_ERR (1 << 8)
+#define BCM2708_DMA_ABORT (1 << 30) /* stop current CB, go to next, WO */
+#define BCM2708_DMA_RESET (1 << 31) /* WO, self clearing */
+
+/* DMA control block "info" field bits */
+#define BCM2708_DMA_INT_EN (1 << 0)
+#define BCM2708_DMA_TDMODE (1 << 1)
+#define BCM2708_DMA_WAIT_RESP (1 << 3)
+#define BCM2708_DMA_D_INC (1 << 4)
+#define BCM2708_DMA_D_WIDTH (1 << 5)
+#define BCM2708_DMA_D_DREQ (1 << 6)
+#define BCM2708_DMA_D_IGNORE (1 << 7)
+#define BCM2708_DMA_S_INC (1 << 8)
+#define BCM2708_DMA_S_WIDTH (1 << 9)
+#define BCM2708_DMA_S_DREQ (1 << 10)
+#define BCM2708_DMA_S_IGNORE (1 << 11)
+
+/* Register offsets */
+#define BCM2708_DMA_CS 0x00 /* Control and Status */
+#define BCM2708_DMA_ADDR 0x04 /* Control block address */
+/* the current control block appears in the following registers - read only */
+#define BCM2708_DMA_INFO 0x08
+#define BCM2708_DMA_SOURCE_AD 0x0c
+#define BCM2708_DMA_DEST_AD 0x10
+#define BCM2708_DMA_TXFR_LEN 0x14
+#define BCM2708_DMA_STRIDE 0x18
+#define BCM2708_DMA_NEXTCB 0x1C
+#define BCM2708_DMA_DEBUG 0x20
+
+#define BCM2708_DMA_INT_STATUS 0xfe0 /* Interrupt status of each channel */
+#define BCM2708_DMA_ENABLE 0xff0 /* Global enable bits for each channel */
+
+#define BCM2708_DMA_CS_RW_MASK 0x30ff0001 /* All RW bits in DMA_CS */
+
+static void bcm2835_dma_update(BCM2835DMAState *s, unsigned c)
+{
+ BCM2835DMAChan *ch = &s->chan[c];
+ uint32_t data, xlen, xlen_td, ylen;
+ int16_t dst_stride, src_stride;
+
+ if (!(s->enable & (1 << c))) {
+ return;
+ }
+
+ while ((s->enable & (1 << c)) && (ch->conblk_ad != 0)) {
+ /* CB fetch */
+ ch->ti = ldl_le_phys(&s->dma_as, ch->conblk_ad);
+ ch->source_ad = ldl_le_phys(&s->dma_as, ch->conblk_ad + 4);
+ ch->dest_ad = ldl_le_phys(&s->dma_as, ch->conblk_ad + 8);
+ ch->txfr_len = ldl_le_phys(&s->dma_as, ch->conblk_ad + 12);
+ ch->stride = ldl_le_phys(&s->dma_as, ch->conblk_ad + 16);
+ ch->nextconbk = ldl_le_phys(&s->dma_as, ch->conblk_ad + 20);
+
+ ylen = 1;
+ if (ch->ti & BCM2708_DMA_TDMODE) {
+ /* 2D transfer mode */
+ ylen += (ch->txfr_len >> 16) & 0x3fff;
+ xlen = ch->txfr_len & 0xffff;
+ dst_stride = ch->stride >> 16;
+ src_stride = ch->stride & 0xffff;
+ } else {
+ xlen = ch->txfr_len;
+ dst_stride = 0;
+ src_stride = 0;
+ }
+ xlen_td = xlen;
+
+ while (ylen != 0) {
+ /* Normal transfer mode */
+ while (xlen != 0) {
+ if (ch->ti & BCM2708_DMA_S_IGNORE) {
+ /* Ignore reads */
+ data = 0;
+ } else {
+ data = ldl_le_phys(&s->dma_as, ch->source_ad);
+ }
+ if (ch->ti & BCM2708_DMA_S_INC) {
+ ch->source_ad += 4;
+ }
+
+ if (ch->ti & BCM2708_DMA_D_IGNORE) {
+ /* Ignore writes */
+ } else {
+ stl_le_phys(&s->dma_as, ch->dest_ad, data);
+ }
+ if (ch->ti & BCM2708_DMA_D_INC) {
+ ch->dest_ad += 4;
+ }
+
+ /* update remaining transfer length */
+ xlen -= 4;
+ if (ch->ti & BCM2708_DMA_TDMODE) {
+ ch->txfr_len = (ylen << 16) | xlen;
+ } else {
+ ch->txfr_len = xlen;
+ }
+ }
+
+ if (--ylen != 0) {
+ ch->source_ad += src_stride;
+ ch->dest_ad += dst_stride;
+ xlen = xlen_td;
+ }
+ }
+ ch->cs |= BCM2708_DMA_END;
+ if (ch->ti & BCM2708_DMA_INT_EN) {
+ ch->cs |= BCM2708_DMA_INT;
+ s->int_status |= (1 << c);
+ qemu_set_irq(ch->irq, 1);
+ }
+
+ /* Process next CB */
+ ch->conblk_ad = ch->nextconbk;
+ }
+
+ ch->cs &= ~BCM2708_DMA_ACTIVE;
+ ch->cs |= BCM2708_DMA_ISPAUSED;
+}
+
+static void bcm2835_dma_chan_reset(BCM2835DMAChan *ch)
+{
+ ch->cs = 0;
+ ch->conblk_ad = 0;
+}
+
+static uint64_t bcm2835_dma_read(BCM2835DMAState *s, hwaddr offset,
+ unsigned size, unsigned c)
+{
+ BCM2835DMAChan *ch;
+ uint32_t res = 0;
+
+ assert(size == 4);
+ assert(c < BCM2835_DMA_NCHANS);
+
+ ch = &s->chan[c];
+
+ switch (offset) {
+ case BCM2708_DMA_CS:
+ res = ch->cs;
+ break;
+ case BCM2708_DMA_ADDR:
+ res = ch->conblk_ad;
+ break;
+ case BCM2708_DMA_INFO:
+ res = ch->ti;
+ break;
+ case BCM2708_DMA_SOURCE_AD:
+ res = ch->source_ad;
+ break;
+ case BCM2708_DMA_DEST_AD:
+ res = ch->dest_ad;
+ break;
+ case BCM2708_DMA_TXFR_LEN:
+ res = ch->txfr_len;
+ break;
+ case BCM2708_DMA_STRIDE:
+ res = ch->stride;
+ break;
+ case BCM2708_DMA_NEXTCB:
+ res = ch->nextconbk;
+ break;
+ case BCM2708_DMA_DEBUG:
+ res = ch->debug;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n",
+ __func__, offset);
+ break;
+ }
+ return res;
+}
+
+static void bcm2835_dma_write(BCM2835DMAState *s, hwaddr offset,
+ uint64_t value, unsigned size, unsigned c)
+{
+ BCM2835DMAChan *ch;
+ uint32_t oldcs;
+
+ assert(size == 4);
+ assert(c < BCM2835_DMA_NCHANS);
+
+ ch = &s->chan[c];
+
+ switch (offset) {
+ case BCM2708_DMA_CS:
+ oldcs = ch->cs;
+ if (value & BCM2708_DMA_RESET) {
+ bcm2835_dma_chan_reset(ch);
+ }
+ if (value & BCM2708_DMA_ABORT) {
+ /* abort is a no-op, since we always run to completion */
+ }
+ if (value & BCM2708_DMA_END) {
+ ch->cs &= ~BCM2708_DMA_END;
+ }
+ if (value & BCM2708_DMA_INT) {
+ ch->cs &= ~BCM2708_DMA_INT;
+ s->int_status &= ~(1 << c);
+ qemu_set_irq(ch->irq, 0);
+ }
+ ch->cs &= ~BCM2708_DMA_CS_RW_MASK;
+ ch->cs |= (value & BCM2708_DMA_CS_RW_MASK);
+ if (!(oldcs & BCM2708_DMA_ACTIVE) && (ch->cs & BCM2708_DMA_ACTIVE)) {
+ bcm2835_dma_update(s, c);
+ }
+ break;
+ case BCM2708_DMA_ADDR:
+ ch->conblk_ad = value;
+ break;
+ case BCM2708_DMA_DEBUG:
+ ch->debug = value;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n",
+ __func__, offset);
+ break;
+ }
+}
+
+static uint64_t bcm2835_dma0_read(void *opaque, hwaddr offset, unsigned size)
+{
+ BCM2835DMAState *s = opaque;
+
+ if (offset < 0xf00) {
+ return bcm2835_dma_read(s, (offset & 0xff), size, (offset >> 8) & 0xf);
+ } else {
+ switch (offset) {
+ case BCM2708_DMA_INT_STATUS:
+ return s->int_status;
+ case BCM2708_DMA_ENABLE:
+ return s->enable;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n",
+ __func__, offset);
+ return 0;
+ }
+ }
+}
+
+static uint64_t bcm2835_dma15_read(void *opaque, hwaddr offset, unsigned size)
+{
+ return bcm2835_dma_read(opaque, (offset & 0xff), size, 15);
+}
+
+static void bcm2835_dma0_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ BCM2835DMAState *s = opaque;
+
+ if (offset < 0xf00) {
+ bcm2835_dma_write(s, (offset & 0xff), value, size, (offset >> 8) & 0xf);
+ } else {
+ switch (offset) {
+ case BCM2708_DMA_INT_STATUS:
+ break;
+ case BCM2708_DMA_ENABLE:
+ s->enable = (value & 0xffff);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n",
+ __func__, offset);
+ }
+ }
+
+}
+
+static void bcm2835_dma15_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ bcm2835_dma_write(opaque, (offset & 0xff), value, size, 15);
+}
+
+static const MemoryRegionOps bcm2835_dma0_ops = {
+ .read = bcm2835_dma0_read,
+ .write = bcm2835_dma0_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static const MemoryRegionOps bcm2835_dma15_ops = {
+ .read = bcm2835_dma15_read,
+ .write = bcm2835_dma15_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static const VMStateDescription vmstate_bcm2835_dma_chan = {
+ .name = TYPE_BCM2835_DMA "-chan",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(cs, BCM2835DMAChan),
+ VMSTATE_UINT32(conblk_ad, BCM2835DMAChan),
+ VMSTATE_UINT32(ti, BCM2835DMAChan),
+ VMSTATE_UINT32(source_ad, BCM2835DMAChan),
+ VMSTATE_UINT32(dest_ad, BCM2835DMAChan),
+ VMSTATE_UINT32(txfr_len, BCM2835DMAChan),
+ VMSTATE_UINT32(stride, BCM2835DMAChan),
+ VMSTATE_UINT32(nextconbk, BCM2835DMAChan),
+ VMSTATE_UINT32(debug, BCM2835DMAChan),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_bcm2835_dma = {
+ .name = TYPE_BCM2835_DMA,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(chan, BCM2835DMAState, BCM2835_DMA_NCHANS, 1,
+ vmstate_bcm2835_dma_chan, BCM2835DMAChan),
+ VMSTATE_UINT32(int_status, BCM2835DMAState),
+ VMSTATE_UINT32(enable, BCM2835DMAState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void bcm2835_dma_init(Object *obj)
+{
+ BCM2835DMAState *s = BCM2835_DMA(obj);
+ int n;
+
+ /* DMA channels 0-14 occupy a contiguous block of IO memory, along
+ * with the global enable and interrupt status bits. Channel 15
+ * has the same register map, but is mapped at a discontiguous
+ * address in a separate IO block.
+ */
+ memory_region_init_io(&s->iomem0, OBJECT(s), &bcm2835_dma0_ops, s,
+ TYPE_BCM2835_DMA, 0x1000);
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem0);
+
+ memory_region_init_io(&s->iomem15, OBJECT(s), &bcm2835_dma15_ops, s,
+ TYPE_BCM2835_DMA "-chan15", 0x100);
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem15);
+
+ for (n = 0; n < 16; n++) {
+ sysbus_init_irq(SYS_BUS_DEVICE(s), &s->chan[n].irq);
+ }
+}
+
+static void bcm2835_dma_reset(DeviceState *dev)
+{
+ BCM2835DMAState *s = BCM2835_DMA(dev);
+ int n;
+
+ s->enable = 0xffff;
+ s->int_status = 0;
+ for (n = 0; n < BCM2835_DMA_NCHANS; n++) {
+ bcm2835_dma_chan_reset(&s->chan[n]);
+ }
+}
+
+static void bcm2835_dma_realize(DeviceState *dev, Error **errp)
+{
+ BCM2835DMAState *s = BCM2835_DMA(dev);
+ Object *obj;
+
+ obj = object_property_get_link(OBJECT(dev), "dma-mr", &error_abort);
+ s->dma_mr = MEMORY_REGION(obj);
+ address_space_init(&s->dma_as, s->dma_mr, TYPE_BCM2835_DMA "-memory");
+
+ bcm2835_dma_reset(dev);
+}
+
+static void bcm2835_dma_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = bcm2835_dma_realize;
+ dc->reset = bcm2835_dma_reset;
+ dc->vmsd = &vmstate_bcm2835_dma;
+}
+
+static TypeInfo bcm2835_dma_info = {
+ .name = TYPE_BCM2835_DMA,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(BCM2835DMAState),
+ .class_init = bcm2835_dma_class_init,
+ .instance_init = bcm2835_dma_init,
+};
+
+static void bcm2835_dma_register_types(void)
+{
+ type_register_static(&bcm2835_dma_info);
+}
+
+type_init(bcm2835_dma_register_types)
diff --git a/hw/dma/etraxfs_dma.c b/hw/dma/etraxfs_dma.c
new file mode 100644
index 000000000..c4334e87b
--- /dev/null
+++ b/hw/dma/etraxfs_dma.c
@@ -0,0 +1,780 @@
+/*
+ * QEMU ETRAX DMA Controller.
+ *
+ * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/hw.h"
+#include "hw/irq.h"
+#include "qemu/main-loop.h"
+#include "sysemu/runstate.h"
+#include "exec/address-spaces.h"
+
+#include "hw/cris/etraxfs_dma.h"
+
+#define D(x)
+
+#define RW_DATA (0x0 / 4)
+#define RW_SAVED_DATA (0x58 / 4)
+#define RW_SAVED_DATA_BUF (0x5c / 4)
+#define RW_GROUP (0x60 / 4)
+#define RW_GROUP_DOWN (0x7c / 4)
+#define RW_CMD (0x80 / 4)
+#define RW_CFG (0x84 / 4)
+#define RW_STAT (0x88 / 4)
+#define RW_INTR_MASK (0x8c / 4)
+#define RW_ACK_INTR (0x90 / 4)
+#define R_INTR (0x94 / 4)
+#define R_MASKED_INTR (0x98 / 4)
+#define RW_STREAM_CMD (0x9c / 4)
+
+#define DMA_REG_MAX (0x100 / 4)
+
+/* descriptors */
+
+// ------------------------------------------------------------ dma_descr_group
+typedef struct dma_descr_group {
+ uint32_t next;
+ unsigned eol : 1;
+ unsigned tol : 1;
+ unsigned bol : 1;
+ unsigned : 1;
+ unsigned intr : 1;
+ unsigned : 2;
+ unsigned en : 1;
+ unsigned : 7;
+ unsigned dis : 1;
+ unsigned md : 16;
+ struct dma_descr_group *up;
+ union {
+ struct dma_descr_context *context;
+ struct dma_descr_group *group;
+ } down;
+} dma_descr_group;
+
+// ---------------------------------------------------------- dma_descr_context
+typedef struct dma_descr_context {
+ uint32_t next;
+ unsigned eol : 1;
+ unsigned : 3;
+ unsigned intr : 1;
+ unsigned : 1;
+ unsigned store_mode : 1;
+ unsigned en : 1;
+ unsigned : 7;
+ unsigned dis : 1;
+ unsigned md0 : 16;
+ unsigned md1;
+ unsigned md2;
+ unsigned md3;
+ unsigned md4;
+ uint32_t saved_data;
+ uint32_t saved_data_buf;
+} dma_descr_context;
+
+// ------------------------------------------------------------- dma_descr_data
+typedef struct dma_descr_data {
+ uint32_t next;
+ uint32_t buf;
+ unsigned eol : 1;
+ unsigned : 2;
+ unsigned out_eop : 1;
+ unsigned intr : 1;
+ unsigned wait : 1;
+ unsigned : 2;
+ unsigned : 3;
+ unsigned in_eop : 1;
+ unsigned : 4;
+ unsigned md : 16;
+ uint32_t after;
+} dma_descr_data;
+
+/* Constants */
+enum {
+ regk_dma_ack_pkt = 0x00000100,
+ regk_dma_anytime = 0x00000001,
+ regk_dma_array = 0x00000008,
+ regk_dma_burst = 0x00000020,
+ regk_dma_client = 0x00000002,
+ regk_dma_copy_next = 0x00000010,
+ regk_dma_copy_up = 0x00000020,
+ regk_dma_data_at_eol = 0x00000001,
+ regk_dma_dis_c = 0x00000010,
+ regk_dma_dis_g = 0x00000020,
+ regk_dma_idle = 0x00000001,
+ regk_dma_intern = 0x00000004,
+ regk_dma_load_c = 0x00000200,
+ regk_dma_load_c_n = 0x00000280,
+ regk_dma_load_c_next = 0x00000240,
+ regk_dma_load_d = 0x00000140,
+ regk_dma_load_g = 0x00000300,
+ regk_dma_load_g_down = 0x000003c0,
+ regk_dma_load_g_next = 0x00000340,
+ regk_dma_load_g_up = 0x00000380,
+ regk_dma_next_en = 0x00000010,
+ regk_dma_next_pkt = 0x00000010,
+ regk_dma_no = 0x00000000,
+ regk_dma_only_at_wait = 0x00000000,
+ regk_dma_restore = 0x00000020,
+ regk_dma_rst = 0x00000001,
+ regk_dma_running = 0x00000004,
+ regk_dma_rw_cfg_default = 0x00000000,
+ regk_dma_rw_cmd_default = 0x00000000,
+ regk_dma_rw_intr_mask_default = 0x00000000,
+ regk_dma_rw_stat_default = 0x00000101,
+ regk_dma_rw_stream_cmd_default = 0x00000000,
+ regk_dma_save_down = 0x00000020,
+ regk_dma_save_up = 0x00000020,
+ regk_dma_set_reg = 0x00000050,
+ regk_dma_set_w_size1 = 0x00000190,
+ regk_dma_set_w_size2 = 0x000001a0,
+ regk_dma_set_w_size4 = 0x000001c0,
+ regk_dma_stopped = 0x00000002,
+ regk_dma_store_c = 0x00000002,
+ regk_dma_store_descr = 0x00000000,
+ regk_dma_store_g = 0x00000004,
+ regk_dma_store_md = 0x00000001,
+ regk_dma_sw = 0x00000008,
+ regk_dma_update_down = 0x00000020,
+ regk_dma_yes = 0x00000001
+};
+
+enum dma_ch_state
+{
+ RST = 1,
+ STOPPED = 2,
+ RUNNING = 4
+};
+
+struct fs_dma_channel
+{
+ qemu_irq irq;
+ struct etraxfs_dma_client *client;
+
+ /* Internal status. */
+ int stream_cmd_src;
+ enum dma_ch_state state;
+
+ unsigned int input : 1;
+ unsigned int eol : 1;
+
+ struct dma_descr_group current_g;
+ struct dma_descr_context current_c;
+ struct dma_descr_data current_d;
+
+ /* Control registers. */
+ uint32_t regs[DMA_REG_MAX];
+};
+
+struct fs_dma_ctrl
+{
+ MemoryRegion mmio;
+ int nr_channels;
+ struct fs_dma_channel *channels;
+
+ QEMUBH *bh;
+};
+
+static void DMA_run(void *opaque);
+static int channel_out_run(struct fs_dma_ctrl *ctrl, int c);
+
+static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg)
+{
+ return ctrl->channels[c].regs[reg];
+}
+
+static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c)
+{
+ return channel_reg(ctrl, c, RW_CFG) & 2;
+}
+
+static inline int channel_en(struct fs_dma_ctrl *ctrl, int c)
+{
+ return (channel_reg(ctrl, c, RW_CFG) & 1)
+ && ctrl->channels[c].client;
+}
+
+static inline int fs_channel(hwaddr addr)
+{
+ /* Every channel has a 0x2000 ctrl register map. */
+ return addr >> 13;
+}
+
+#ifdef USE_THIS_DEAD_CODE
+static void channel_load_g(struct fs_dma_ctrl *ctrl, int c)
+{
+ hwaddr addr = channel_reg(ctrl, c, RW_GROUP);
+
+ /* Load and decode. FIXME: handle endianness. */
+ cpu_physical_memory_read(addr, &ctrl->channels[c].current_g,
+ sizeof(ctrl->channels[c].current_g));
+}
+
+static void dump_c(int ch, struct dma_descr_context *c)
+{
+ printf("%s ch=%d\n", __func__, ch);
+ printf("next=%x\n", c->next);
+ printf("saved_data=%x\n", c->saved_data);
+ printf("saved_data_buf=%x\n", c->saved_data_buf);
+ printf("eol=%x\n", (uint32_t) c->eol);
+}
+
+static void dump_d(int ch, struct dma_descr_data *d)
+{
+ printf("%s ch=%d\n", __func__, ch);
+ printf("next=%x\n", d->next);
+ printf("buf=%x\n", d->buf);
+ printf("after=%x\n", d->after);
+ printf("intr=%x\n", (uint32_t) d->intr);
+ printf("out_eop=%x\n", (uint32_t) d->out_eop);
+ printf("in_eop=%x\n", (uint32_t) d->in_eop);
+ printf("eol=%x\n", (uint32_t) d->eol);
+}
+#endif
+
+static void channel_load_c(struct fs_dma_ctrl *ctrl, int c)
+{
+ hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
+
+ /* Load and decode. FIXME: handle endianness. */
+ cpu_physical_memory_read(addr, &ctrl->channels[c].current_c,
+ sizeof(ctrl->channels[c].current_c));
+
+ D(dump_c(c, &ctrl->channels[c].current_c));
+ /* I guess this should update the current pos. */
+ ctrl->channels[c].regs[RW_SAVED_DATA] =
+ (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data;
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
+ (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf;
+}
+
+static void channel_load_d(struct fs_dma_ctrl *ctrl, int c)
+{
+ hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
+
+ /* Load and decode. FIXME: handle endianness. */
+ D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
+ cpu_physical_memory_read(addr, &ctrl->channels[c].current_d,
+ sizeof(ctrl->channels[c].current_d));
+
+ D(dump_d(c, &ctrl->channels[c].current_d));
+ ctrl->channels[c].regs[RW_DATA] = addr;
+}
+
+static void channel_store_c(struct fs_dma_ctrl *ctrl, int c)
+{
+ hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
+
+ /* Encode and store. FIXME: handle endianness. */
+ D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
+ D(dump_d(c, &ctrl->channels[c].current_d));
+ cpu_physical_memory_write(addr, &ctrl->channels[c].current_c,
+ sizeof(ctrl->channels[c].current_c));
+}
+
+static void channel_store_d(struct fs_dma_ctrl *ctrl, int c)
+{
+ hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
+
+ /* Encode and store. FIXME: handle endianness. */
+ D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
+ cpu_physical_memory_write(addr, &ctrl->channels[c].current_d,
+ sizeof(ctrl->channels[c].current_d));
+}
+
+static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c)
+{
+ /* FIXME: */
+}
+
+static inline void channel_start(struct fs_dma_ctrl *ctrl, int c)
+{
+ if (ctrl->channels[c].client)
+ {
+ ctrl->channels[c].eol = 0;
+ ctrl->channels[c].state = RUNNING;
+ if (!ctrl->channels[c].input)
+ channel_out_run(ctrl, c);
+ } else
+ printf("WARNING: starting DMA ch %d with no client\n", c);
+
+ qemu_bh_schedule_idle(ctrl->bh);
+}
+
+static void channel_continue(struct fs_dma_ctrl *ctrl, int c)
+{
+ if (!channel_en(ctrl, c)
+ || channel_stopped(ctrl, c)
+ || ctrl->channels[c].state != RUNNING
+ /* Only reload the current data descriptor if it has eol set. */
+ || !ctrl->channels[c].current_d.eol) {
+ D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n",
+ c, ctrl->channels[c].state,
+ channel_stopped(ctrl, c),
+ channel_en(ctrl,c),
+ ctrl->channels[c].eol));
+ D(dump_d(c, &ctrl->channels[c].current_d));
+ return;
+ }
+
+ /* Reload the current descriptor. */
+ channel_load_d(ctrl, c);
+
+ /* If the current descriptor cleared the eol flag and we had already
+ reached eol state, do the continue. */
+ if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) {
+ D(printf("continue %d ok %x\n", c,
+ ctrl->channels[c].current_d.next));
+ ctrl->channels[c].regs[RW_SAVED_DATA] =
+ (uint32_t)(unsigned long)ctrl->channels[c].current_d.next;
+ channel_load_d(ctrl, c);
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
+ (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
+
+ channel_start(ctrl, c);
+ }
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
+ (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
+}
+
+static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v)
+{
+ unsigned int cmd = v & ((1 << 10) - 1);
+
+ D(printf("%s ch=%d cmd=%x\n",
+ __func__, c, cmd));
+ if (cmd & regk_dma_load_d) {
+ channel_load_d(ctrl, c);
+ if (cmd & regk_dma_burst)
+ channel_start(ctrl, c);
+ }
+
+ if (cmd & regk_dma_load_c) {
+ channel_load_c(ctrl, c);
+ }
+}
+
+static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c)
+{
+ D(printf("%s %d\n", __func__, c));
+ ctrl->channels[c].regs[R_INTR] &=
+ ~(ctrl->channels[c].regs[RW_ACK_INTR]);
+
+ ctrl->channels[c].regs[R_MASKED_INTR] =
+ ctrl->channels[c].regs[R_INTR]
+ & ctrl->channels[c].regs[RW_INTR_MASK];
+
+ D(printf("%s: chan=%d masked_intr=%x\n", __func__,
+ c,
+ ctrl->channels[c].regs[R_MASKED_INTR]));
+
+ qemu_set_irq(ctrl->channels[c].irq,
+ !!ctrl->channels[c].regs[R_MASKED_INTR]);
+}
+
+static int channel_out_run(struct fs_dma_ctrl *ctrl, int c)
+{
+ uint32_t len;
+ uint32_t saved_data_buf;
+ unsigned char buf[2 * 1024];
+
+ struct dma_context_metadata meta;
+ bool send_context = true;
+
+ if (ctrl->channels[c].eol)
+ return 0;
+
+ do {
+ bool out_eop;
+ D(printf("ch=%d buf=%x after=%x\n",
+ c,
+ (uint32_t)ctrl->channels[c].current_d.buf,
+ (uint32_t)ctrl->channels[c].current_d.after));
+
+ if (send_context) {
+ if (ctrl->channels[c].client->client.metadata_push) {
+ meta.metadata = ctrl->channels[c].current_d.md;
+ ctrl->channels[c].client->client.metadata_push(
+ ctrl->channels[c].client->client.opaque,
+ &meta);
+ }
+ send_context = false;
+ }
+
+ channel_load_d(ctrl, c);
+ saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
+ len = (uint32_t)(unsigned long)
+ ctrl->channels[c].current_d.after;
+ len -= saved_data_buf;
+
+ if (len > sizeof buf)
+ len = sizeof buf;
+ cpu_physical_memory_read (saved_data_buf, buf, len);
+
+ out_eop = ((saved_data_buf + len) ==
+ ctrl->channels[c].current_d.after) &&
+ ctrl->channels[c].current_d.out_eop;
+
+ D(printf("channel %d pushes %x %u bytes eop=%u\n", c,
+ saved_data_buf, len, out_eop));
+
+ if (ctrl->channels[c].client->client.push) {
+ if (len > 0) {
+ ctrl->channels[c].client->client.push(
+ ctrl->channels[c].client->client.opaque,
+ buf, len, out_eop);
+ }
+ } else {
+ printf("WARNING: DMA ch%d dataloss,"
+ " no attached client.\n", c);
+ }
+
+ saved_data_buf += len;
+
+ if (saved_data_buf == (uint32_t)(unsigned long)
+ ctrl->channels[c].current_d.after) {
+ /* Done. Step to next. */
+ if (ctrl->channels[c].current_d.out_eop) {
+ send_context = true;
+ }
+ if (ctrl->channels[c].current_d.intr) {
+ /* data intr. */
+ D(printf("signal intr %d eol=%d\n",
+ len, ctrl->channels[c].current_d.eol));
+ ctrl->channels[c].regs[R_INTR] |= (1 << 2);
+ channel_update_irq(ctrl, c);
+ }
+ channel_store_d(ctrl, c);
+ if (ctrl->channels[c].current_d.eol) {
+ D(printf("channel %d EOL\n", c));
+ ctrl->channels[c].eol = 1;
+
+ /* Mark the context as disabled. */
+ ctrl->channels[c].current_c.dis = 1;
+ channel_store_c(ctrl, c);
+
+ channel_stop(ctrl, c);
+ } else {
+ ctrl->channels[c].regs[RW_SAVED_DATA] =
+ (uint32_t)(unsigned long)ctrl->
+ channels[c].current_d.next;
+ /* Load new descriptor. */
+ channel_load_d(ctrl, c);
+ saved_data_buf = (uint32_t)(unsigned long)
+ ctrl->channels[c].current_d.buf;
+ }
+
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
+ saved_data_buf;
+ D(dump_d(c, &ctrl->channels[c].current_d));
+ }
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
+ } while (!ctrl->channels[c].eol);
+ return 1;
+}
+
+static int channel_in_process(struct fs_dma_ctrl *ctrl, int c,
+ unsigned char *buf, int buflen, int eop)
+{
+ uint32_t len;
+ uint32_t saved_data_buf;
+
+ if (ctrl->channels[c].eol == 1)
+ return 0;
+
+ channel_load_d(ctrl, c);
+ saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
+ len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after;
+ len -= saved_data_buf;
+
+ if (len > buflen)
+ len = buflen;
+
+ cpu_physical_memory_write (saved_data_buf, buf, len);
+ saved_data_buf += len;
+
+ if (saved_data_buf ==
+ (uint32_t)(unsigned long)ctrl->channels[c].current_d.after
+ || eop) {
+ uint32_t r_intr = ctrl->channels[c].regs[R_INTR];
+
+ D(printf("in dscr end len=%d\n",
+ ctrl->channels[c].current_d.after
+ - ctrl->channels[c].current_d.buf));
+ ctrl->channels[c].current_d.after = saved_data_buf;
+
+ /* Done. Step to next. */
+ if (ctrl->channels[c].current_d.intr) {
+ /* TODO: signal eop to the client. */
+ /* data intr. */
+ ctrl->channels[c].regs[R_INTR] |= 3;
+ }
+ if (eop) {
+ ctrl->channels[c].current_d.in_eop = 1;
+ ctrl->channels[c].regs[R_INTR] |= 8;
+ }
+ if (r_intr != ctrl->channels[c].regs[R_INTR])
+ channel_update_irq(ctrl, c);
+
+ channel_store_d(ctrl, c);
+ D(dump_d(c, &ctrl->channels[c].current_d));
+
+ if (ctrl->channels[c].current_d.eol) {
+ D(printf("channel %d EOL\n", c));
+ ctrl->channels[c].eol = 1;
+
+ /* Mark the context as disabled. */
+ ctrl->channels[c].current_c.dis = 1;
+ channel_store_c(ctrl, c);
+
+ channel_stop(ctrl, c);
+ } else {
+ ctrl->channels[c].regs[RW_SAVED_DATA] =
+ (uint32_t)(unsigned long)ctrl->
+ channels[c].current_d.next;
+ /* Load new descriptor. */
+ channel_load_d(ctrl, c);
+ saved_data_buf = (uint32_t)(unsigned long)
+ ctrl->channels[c].current_d.buf;
+ }
+ }
+
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
+ return len;
+}
+
+static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c)
+{
+ if (ctrl->channels[c].client->client.pull) {
+ ctrl->channels[c].client->client.pull(
+ ctrl->channels[c].client->client.opaque);
+ return 1;
+ } else
+ return 0;
+}
+
+static uint32_t dma_rinvalid (void *opaque, hwaddr addr)
+{
+ hw_error("Unsupported short raccess. reg=" TARGET_FMT_plx "\n", addr);
+ return 0;
+}
+
+static uint64_t
+dma_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ struct fs_dma_ctrl *ctrl = opaque;
+ int c;
+ uint32_t r = 0;
+
+ if (size != 4) {
+ dma_rinvalid(opaque, addr);
+ }
+
+ /* Make addr relative to this channel and bounded to nr regs. */
+ c = fs_channel(addr);
+ addr &= 0xff;
+ addr >>= 2;
+ switch (addr)
+ {
+ case RW_STAT:
+ r = ctrl->channels[c].state & 7;
+ r |= ctrl->channels[c].eol << 5;
+ r |= ctrl->channels[c].stream_cmd_src << 8;
+ break;
+
+ default:
+ r = ctrl->channels[c].regs[addr];
+ D(printf ("%s c=%d addr=" TARGET_FMT_plx "\n",
+ __func__, c, addr));
+ break;
+ }
+ return r;
+}
+
+static void
+dma_winvalid (void *opaque, hwaddr addr, uint32_t value)
+{
+ hw_error("Unsupported short waccess. reg=" TARGET_FMT_plx "\n", addr);
+}
+
+static void
+dma_update_state(struct fs_dma_ctrl *ctrl, int c)
+{
+ if (ctrl->channels[c].regs[RW_CFG] & 2)
+ ctrl->channels[c].state = STOPPED;
+ if (!(ctrl->channels[c].regs[RW_CFG] & 1))
+ ctrl->channels[c].state = RST;
+}
+
+static void
+dma_write(void *opaque, hwaddr addr,
+ uint64_t val64, unsigned int size)
+{
+ struct fs_dma_ctrl *ctrl = opaque;
+ uint32_t value = val64;
+ int c;
+
+ if (size != 4) {
+ dma_winvalid(opaque, addr, value);
+ }
+
+ /* Make addr relative to this channel and bounded to nr regs. */
+ c = fs_channel(addr);
+ addr &= 0xff;
+ addr >>= 2;
+ switch (addr)
+ {
+ case RW_DATA:
+ ctrl->channels[c].regs[addr] = value;
+ break;
+
+ case RW_CFG:
+ ctrl->channels[c].regs[addr] = value;
+ dma_update_state(ctrl, c);
+ break;
+ case RW_CMD:
+ /* continue. */
+ if (value & ~1)
+ printf("Invalid store to ch=%d RW_CMD %x\n",
+ c, value);
+ ctrl->channels[c].regs[addr] = value;
+ channel_continue(ctrl, c);
+ break;
+
+ case RW_SAVED_DATA:
+ case RW_SAVED_DATA_BUF:
+ case RW_GROUP:
+ case RW_GROUP_DOWN:
+ ctrl->channels[c].regs[addr] = value;
+ break;
+
+ case RW_ACK_INTR:
+ case RW_INTR_MASK:
+ ctrl->channels[c].regs[addr] = value;
+ channel_update_irq(ctrl, c);
+ if (addr == RW_ACK_INTR)
+ ctrl->channels[c].regs[RW_ACK_INTR] = 0;
+ break;
+
+ case RW_STREAM_CMD:
+ if (value & ~1023)
+ printf("Invalid store to ch=%d "
+ "RW_STREAMCMD %x\n",
+ c, value);
+ ctrl->channels[c].regs[addr] = value;
+ D(printf("stream_cmd ch=%d\n", c));
+ channel_stream_cmd(ctrl, c, value);
+ break;
+
+ default:
+ D(printf ("%s c=%d " TARGET_FMT_plx "\n",
+ __func__, c, addr));
+ break;
+ }
+}
+
+static const MemoryRegionOps dma_ops = {
+ .read = dma_read,
+ .write = dma_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 4
+ }
+};
+
+static int etraxfs_dmac_run(void *opaque)
+{
+ struct fs_dma_ctrl *ctrl = opaque;
+ int i;
+ int p = 0;
+
+ for (i = 0;
+ i < ctrl->nr_channels;
+ i++)
+ {
+ if (ctrl->channels[i].state == RUNNING)
+ {
+ if (ctrl->channels[i].input) {
+ p += channel_in_run(ctrl, i);
+ } else {
+ p += channel_out_run(ctrl, i);
+ }
+ }
+ }
+ return p;
+}
+
+int etraxfs_dmac_input(struct etraxfs_dma_client *client,
+ void *buf, int len, int eop)
+{
+ return channel_in_process(client->ctrl, client->channel,
+ buf, len, eop);
+}
+
+/* Connect an IRQ line with a channel. */
+void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input)
+{
+ struct fs_dma_ctrl *ctrl = opaque;
+ ctrl->channels[c].irq = *line;
+ ctrl->channels[c].input = input;
+}
+
+void etraxfs_dmac_connect_client(void *opaque, int c,
+ struct etraxfs_dma_client *cl)
+{
+ struct fs_dma_ctrl *ctrl = opaque;
+ cl->ctrl = ctrl;
+ cl->channel = c;
+ ctrl->channels[c].client = cl;
+}
+
+
+static void DMA_run(void *opaque)
+{
+ struct fs_dma_ctrl *etraxfs_dmac = opaque;
+ int p = 1;
+
+ if (runstate_is_running())
+ p = etraxfs_dmac_run(etraxfs_dmac);
+
+ if (p)
+ qemu_bh_schedule_idle(etraxfs_dmac->bh);
+}
+
+void *etraxfs_dmac_init(hwaddr base, int nr_channels)
+{
+ struct fs_dma_ctrl *ctrl = NULL;
+
+ ctrl = g_malloc0(sizeof *ctrl);
+
+ ctrl->bh = qemu_bh_new(DMA_run, ctrl);
+
+ ctrl->nr_channels = nr_channels;
+ ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels);
+
+ memory_region_init_io(&ctrl->mmio, NULL, &dma_ops, ctrl, "etraxfs-dma",
+ nr_channels * 0x2000);
+ memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio);
+
+ return ctrl;
+}
diff --git a/hw/dma/i82374.c b/hw/dma/i82374.c
new file mode 100644
index 000000000..34c3aaf7d
--- /dev/null
+++ b/hw/dma/i82374.c
@@ -0,0 +1,168 @@
+/*
+ * QEMU Intel 82374 emulation (Enhanced DMA controller)
+ *
+ * Copyright (c) 2010 Hervé Poussineau
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "hw/isa/isa.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "hw/dma/i8257.h"
+#include "qom/object.h"
+
+#define TYPE_I82374 "i82374"
+OBJECT_DECLARE_SIMPLE_TYPE(I82374State, I82374)
+
+//#define DEBUG_I82374
+
+#ifdef DEBUG_I82374
+#define DPRINTF(fmt, ...) \
+do { fprintf(stderr, "i82374: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+do {} while (0)
+#endif
+#define BADF(fmt, ...) \
+do { fprintf(stderr, "i82374 ERROR: " fmt , ## __VA_ARGS__); } while (0)
+
+struct I82374State {
+ ISADevice parent_obj;
+
+ uint32_t iobase;
+ uint8_t commands[8];
+ PortioList port_list;
+};
+
+static const VMStateDescription vmstate_i82374 = {
+ .name = "i82374",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(commands, I82374State, 8),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static uint32_t i82374_read_isr(void *opaque, uint32_t nport)
+{
+ uint32_t val = 0;
+
+ BADF("%s: %08x\n", __func__, nport);
+
+ DPRINTF("%s: %08x=%08x\n", __func__, nport, val);
+ return val;
+}
+
+static void i82374_write_command(void *opaque, uint32_t nport, uint32_t data)
+{
+ DPRINTF("%s: %08x=%08x\n", __func__, nport, data);
+
+ if (data != 0x42) {
+ /* Not Stop S/G command */
+ BADF("%s: %08x=%08x\n", __func__, nport, data);
+ }
+}
+
+static uint32_t i82374_read_status(void *opaque, uint32_t nport)
+{
+ uint32_t val = 0;
+
+ BADF("%s: %08x\n", __func__, nport);
+
+ DPRINTF("%s: %08x=%08x\n", __func__, nport, val);
+ return val;
+}
+
+static void i82374_write_descriptor(void *opaque, uint32_t nport, uint32_t data)
+{
+ DPRINTF("%s: %08x=%08x\n", __func__, nport, data);
+
+ BADF("%s: %08x=%08x\n", __func__, nport, data);
+}
+
+static uint32_t i82374_read_descriptor(void *opaque, uint32_t nport)
+{
+ uint32_t val = 0;
+
+ BADF("%s: %08x\n", __func__, nport);
+
+ DPRINTF("%s: %08x=%08x\n", __func__, nport, val);
+ return val;
+}
+
+static const MemoryRegionPortio i82374_portio_list[] = {
+ { 0x0A, 1, 1, .read = i82374_read_isr, },
+ { 0x10, 8, 1, .write = i82374_write_command, },
+ { 0x18, 8, 1, .read = i82374_read_status, },
+ { 0x20, 0x20, 1,
+ .write = i82374_write_descriptor, .read = i82374_read_descriptor, },
+ PORTIO_END_OF_LIST(),
+};
+
+static void i82374_realize(DeviceState *dev, Error **errp)
+{
+ I82374State *s = I82374(dev);
+ ISABus *isa_bus = isa_bus_from_device(ISA_DEVICE(dev));
+
+ if (isa_get_dma(isa_bus, 0)) {
+ error_setg(errp, "DMA already initialized on ISA bus");
+ return;
+ }
+ i8257_dma_init(isa_bus, true);
+
+ portio_list_init(&s->port_list, OBJECT(s), i82374_portio_list, s,
+ "i82374");
+ portio_list_add(&s->port_list, isa_address_space_io(&s->parent_obj),
+ s->iobase);
+
+ memset(s->commands, 0, sizeof(s->commands));
+}
+
+static Property i82374_properties[] = {
+ DEFINE_PROP_UINT32("iobase", I82374State, iobase, 0x400),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+static void i82374_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = i82374_realize;
+ dc->vmsd = &vmstate_i82374;
+ device_class_set_props(dc, i82374_properties);
+}
+
+static const TypeInfo i82374_info = {
+ .name = TYPE_I82374,
+ .parent = TYPE_ISA_DEVICE,
+ .instance_size = sizeof(I82374State),
+ .class_init = i82374_class_init,
+};
+
+static void i82374_register_types(void)
+{
+ type_register_static(&i82374_info);
+}
+
+type_init(i82374_register_types)
diff --git a/hw/dma/i8257.c b/hw/dma/i8257.c
new file mode 100644
index 000000000..de5f69691
--- /dev/null
+++ b/hw/dma/i8257.c
@@ -0,0 +1,657 @@
+/*
+ * QEMU DMA emulation
+ *
+ * Copyright (c) 2003-2004 Vassili Karpov (malc)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/isa/isa.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "hw/dma/i8257.h"
+#include "qapi/error.h"
+#include "qemu/main-loop.h"
+#include "qemu/module.h"
+#include "qemu/log.h"
+#include "trace.h"
+
+
+/* #define DEBUG_DMA */
+
+#define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
+#ifdef DEBUG_DMA
+#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
+#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
+#else
+#define linfo(...)
+#define ldebug(...)
+#endif
+
+#define ADDR 0
+#define COUNT 1
+
+enum {
+ CMD_MEMORY_TO_MEMORY = 0x01,
+ CMD_FIXED_ADDRESS = 0x02,
+ CMD_BLOCK_CONTROLLER = 0x04,
+ CMD_COMPRESSED_TIME = 0x08,
+ CMD_CYCLIC_PRIORITY = 0x10,
+ CMD_EXTENDED_WRITE = 0x20,
+ CMD_LOW_DREQ = 0x40,
+ CMD_LOW_DACK = 0x80,
+ CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
+ | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
+ | CMD_LOW_DREQ | CMD_LOW_DACK
+
+};
+
+static void i8257_dma_run(void *opaque);
+
+static const int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
+
+static void i8257_write_page(void *opaque, uint32_t nport, uint32_t data)
+{
+ I8257State *d = opaque;
+ int ichan;
+
+ ichan = channels[nport & 7];
+ if (-1 == ichan) {
+ dolog ("invalid channel %#x %#x\n", nport, data);
+ return;
+ }
+ d->regs[ichan].page = data;
+}
+
+static void i8257_write_pageh(void *opaque, uint32_t nport, uint32_t data)
+{
+ I8257State *d = opaque;
+ int ichan;
+
+ ichan = channels[nport & 7];
+ if (-1 == ichan) {
+ dolog ("invalid channel %#x %#x\n", nport, data);
+ return;
+ }
+ d->regs[ichan].pageh = data;
+}
+
+static uint32_t i8257_read_page(void *opaque, uint32_t nport)
+{
+ I8257State *d = opaque;
+ int ichan;
+
+ ichan = channels[nport & 7];
+ if (-1 == ichan) {
+ dolog ("invalid channel read %#x\n", nport);
+ return 0;
+ }
+ return d->regs[ichan].page;
+}
+
+static uint32_t i8257_read_pageh(void *opaque, uint32_t nport)
+{
+ I8257State *d = opaque;
+ int ichan;
+
+ ichan = channels[nport & 7];
+ if (-1 == ichan) {
+ dolog ("invalid channel read %#x\n", nport);
+ return 0;
+ }
+ return d->regs[ichan].pageh;
+}
+
+static inline void i8257_init_chan(I8257State *d, int ichan)
+{
+ I8257Regs *r;
+
+ r = d->regs + ichan;
+ r->now[ADDR] = r->base[ADDR] << d->dshift;
+ r->now[COUNT] = 0;
+}
+
+static inline int i8257_getff(I8257State *d)
+{
+ int ff;
+
+ ff = d->flip_flop;
+ d->flip_flop = !ff;
+ return ff;
+}
+
+static uint64_t i8257_read_chan(void *opaque, hwaddr nport, unsigned size)
+{
+ I8257State *d = opaque;
+ int ichan, nreg, iport, ff, val, dir;
+ I8257Regs *r;
+
+ iport = (nport >> d->dshift) & 0x0f;
+ ichan = iport >> 1;
+ nreg = iport & 1;
+ r = d->regs + ichan;
+
+ dir = ((r->mode >> 5) & 1) ? -1 : 1;
+ ff = i8257_getff(d);
+ if (nreg)
+ val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
+ else
+ val = r->now[ADDR] + r->now[COUNT] * dir;
+
+ ldebug ("read_chan %#x -> %d\n", iport, val);
+ return (val >> (d->dshift + (ff << 3))) & 0xff;
+}
+
+static void i8257_write_chan(void *opaque, hwaddr nport, uint64_t data,
+ unsigned int size)
+{
+ I8257State *d = opaque;
+ int iport, ichan, nreg;
+ I8257Regs *r;
+
+ iport = (nport >> d->dshift) & 0x0f;
+ ichan = iport >> 1;
+ nreg = iport & 1;
+ r = d->regs + ichan;
+ if (i8257_getff(d)) {
+ r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
+ i8257_init_chan(d, ichan);
+ } else {
+ r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
+ }
+}
+
+static void i8257_write_cont(void *opaque, hwaddr nport, uint64_t data,
+ unsigned int size)
+{
+ I8257State *d = opaque;
+ int iport, ichan = 0;
+
+ iport = (nport >> d->dshift) & 0x0f;
+ switch (iport) {
+ case 0x00: /* command */
+ if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
+ qemu_log_mask(LOG_UNIMP, "%s: cmd 0x%02"PRIx64" not supported\n",
+ __func__, data);
+ return;
+ }
+ d->command = data;
+ break;
+
+ case 0x01:
+ ichan = data & 3;
+ if (data & 4) {
+ d->status |= 1 << (ichan + 4);
+ }
+ else {
+ d->status &= ~(1 << (ichan + 4));
+ }
+ d->status &= ~(1 << ichan);
+ i8257_dma_run(d);
+ break;
+
+ case 0x02: /* single mask */
+ if (data & 4)
+ d->mask |= 1 << (data & 3);
+ else
+ d->mask &= ~(1 << (data & 3));
+ i8257_dma_run(d);
+ break;
+
+ case 0x03: /* mode */
+ {
+ ichan = data & 3;
+#ifdef DEBUG_DMA
+ {
+ int op, ai, dir, opmode;
+ op = (data >> 2) & 3;
+ ai = (data >> 4) & 1;
+ dir = (data >> 5) & 1;
+ opmode = (data >> 6) & 3;
+
+ linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
+ ichan, op, ai, dir, opmode);
+ }
+#endif
+ d->regs[ichan].mode = data;
+ break;
+ }
+
+ case 0x04: /* clear flip flop */
+ d->flip_flop = 0;
+ break;
+
+ case 0x05: /* reset */
+ d->flip_flop = 0;
+ d->mask = ~0;
+ d->status = 0;
+ d->command = 0;
+ break;
+
+ case 0x06: /* clear mask for all channels */
+ d->mask = 0;
+ i8257_dma_run(d);
+ break;
+
+ case 0x07: /* write mask for all channels */
+ d->mask = data;
+ i8257_dma_run(d);
+ break;
+
+ default:
+ dolog ("unknown iport %#x\n", iport);
+ break;
+ }
+
+#ifdef DEBUG_DMA
+ if (0xc != iport) {
+ linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
+ nport, ichan, data);
+ }
+#endif
+}
+
+static uint64_t i8257_read_cont(void *opaque, hwaddr nport, unsigned size)
+{
+ I8257State *d = opaque;
+ int iport, val;
+
+ iport = (nport >> d->dshift) & 0x0f;
+ switch (iport) {
+ case 0x00: /* status */
+ val = d->status;
+ d->status &= 0xf0;
+ break;
+ case 0x01: /* mask */
+ val = d->mask;
+ break;
+ default:
+ val = 0;
+ break;
+ }
+
+ ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
+ return val;
+}
+
+static bool i8257_dma_has_autoinitialization(IsaDma *obj, int nchan)
+{
+ I8257State *d = I8257(obj);
+ return (d->regs[nchan & 3].mode >> 4) & 1;
+}
+
+static void i8257_dma_hold_DREQ(IsaDma *obj, int nchan)
+{
+ I8257State *d = I8257(obj);
+ int ichan;
+
+ ichan = nchan & 3;
+ d->status |= 1 << (ichan + 4);
+ i8257_dma_run(d);
+}
+
+static void i8257_dma_release_DREQ(IsaDma *obj, int nchan)
+{
+ I8257State *d = I8257(obj);
+ int ichan;
+
+ ichan = nchan & 3;
+ d->status &= ~(1 << (ichan + 4));
+ i8257_dma_run(d);
+}
+
+static void i8257_channel_run(I8257State *d, int ichan)
+{
+ int ncont = d->dshift;
+ int n;
+ I8257Regs *r = &d->regs[ichan];
+#ifdef DEBUG_DMA
+ int dir, opmode;
+
+ dir = (r->mode >> 5) & 1;
+ opmode = (r->mode >> 6) & 3;
+
+ if (dir) {
+ dolog ("DMA in address decrement mode\n");
+ }
+ if (opmode != 1) {
+ dolog ("DMA not in single mode select %#x\n", opmode);
+ }
+#endif
+
+ n = r->transfer_handler (r->opaque, ichan + (ncont << 2),
+ r->now[COUNT], (r->base[COUNT] + 1) << ncont);
+ r->now[COUNT] = n;
+ ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
+ if (n == (r->base[COUNT] + 1) << ncont) {
+ ldebug("transfer done\n");
+ d->status |= (1 << ichan);
+ }
+}
+
+static void i8257_dma_run(void *opaque)
+{
+ I8257State *d = opaque;
+ int ichan;
+ int rearm = 0;
+
+ if (d->running) {
+ rearm = 1;
+ goto out;
+ } else {
+ d->running = 1;
+ }
+
+ for (ichan = 0; ichan < 4; ichan++) {
+ int mask;
+
+ mask = 1 << ichan;
+
+ if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) {
+ i8257_channel_run(d, ichan);
+ rearm = 1;
+ }
+ }
+
+ d->running = 0;
+out:
+ if (rearm) {
+ qemu_bh_schedule_idle(d->dma_bh);
+ d->dma_bh_scheduled = true;
+ }
+}
+
+static void i8257_dma_register_channel(IsaDma *obj, int nchan,
+ IsaDmaTransferHandler transfer_handler,
+ void *opaque)
+{
+ I8257State *d = I8257(obj);
+ I8257Regs *r;
+ int ichan;
+
+ ichan = nchan & 3;
+
+ r = d->regs + ichan;
+ r->transfer_handler = transfer_handler;
+ r->opaque = opaque;
+}
+
+static bool i8257_is_verify_transfer(I8257Regs *r)
+{
+ return (r->mode & 0x0c) == 0;
+}
+
+static int i8257_dma_read_memory(IsaDma *obj, int nchan, void *buf, int pos,
+ int len)
+{
+ I8257State *d = I8257(obj);
+ I8257Regs *r = &d->regs[nchan & 3];
+ hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
+
+ if (i8257_is_verify_transfer(r)) {
+ return len;
+ }
+
+ if (r->mode & 0x20) {
+ int i;
+ uint8_t *p = buf;
+
+ cpu_physical_memory_read (addr - pos - len, buf, len);
+ /* What about 16bit transfers? */
+ for (i = 0; i < len >> 1; i++) {
+ uint8_t b = p[len - i - 1];
+ p[i] = b;
+ }
+ }
+ else
+ cpu_physical_memory_read (addr + pos, buf, len);
+
+ return len;
+}
+
+static int i8257_dma_write_memory(IsaDma *obj, int nchan, void *buf, int pos,
+ int len)
+{
+ I8257State *s = I8257(obj);
+ I8257Regs *r = &s->regs[nchan & 3];
+ hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
+
+ if (i8257_is_verify_transfer(r)) {
+ return len;
+ }
+
+ if (r->mode & 0x20) {
+ int i;
+ uint8_t *p = buf;
+
+ cpu_physical_memory_write (addr - pos - len, buf, len);
+ /* What about 16bit transfers? */
+ for (i = 0; i < len; i++) {
+ uint8_t b = p[len - i - 1];
+ p[i] = b;
+ }
+ }
+ else
+ cpu_physical_memory_write (addr + pos, buf, len);
+
+ return len;
+}
+
+/* request the emulator to transfer a new DMA memory block ASAP (even
+ * if the idle bottom half would not have exited the iothread yet).
+ */
+static void i8257_dma_schedule(IsaDma *obj)
+{
+ I8257State *d = I8257(obj);
+ if (d->dma_bh_scheduled) {
+ qemu_notify_event();
+ }
+}
+
+static void i8257_reset(DeviceState *dev)
+{
+ I8257State *d = I8257(dev);
+ i8257_write_cont(d, (0x05 << d->dshift), 0, 1);
+}
+
+static int i8257_phony_handler(void *opaque, int nchan, int dma_pos,
+ int dma_len)
+{
+ trace_i8257_unregistered_dma(nchan, dma_pos, dma_len);
+ return dma_pos;
+}
+
+
+static const MemoryRegionOps channel_io_ops = {
+ .read = i8257_read_chan,
+ .write = i8257_write_chan,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+};
+
+/* IOport from page_base */
+static const MemoryRegionPortio page_portio_list[] = {
+ { 0x01, 3, 1, .write = i8257_write_page, .read = i8257_read_page, },
+ { 0x07, 1, 1, .write = i8257_write_page, .read = i8257_read_page, },
+ PORTIO_END_OF_LIST(),
+};
+
+/* IOport from pageh_base */
+static const MemoryRegionPortio pageh_portio_list[] = {
+ { 0x01, 3, 1, .write = i8257_write_pageh, .read = i8257_read_pageh, },
+ { 0x07, 3, 1, .write = i8257_write_pageh, .read = i8257_read_pageh, },
+ PORTIO_END_OF_LIST(),
+};
+
+static const MemoryRegionOps cont_io_ops = {
+ .read = i8257_read_cont,
+ .write = i8257_write_cont,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+};
+
+static const VMStateDescription vmstate_i8257_regs = {
+ .name = "dma_regs",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32_ARRAY(now, I8257Regs, 2),
+ VMSTATE_UINT16_ARRAY(base, I8257Regs, 2),
+ VMSTATE_UINT8(mode, I8257Regs),
+ VMSTATE_UINT8(page, I8257Regs),
+ VMSTATE_UINT8(pageh, I8257Regs),
+ VMSTATE_UINT8(dack, I8257Regs),
+ VMSTATE_UINT8(eop, I8257Regs),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int i8257_post_load(void *opaque, int version_id)
+{
+ I8257State *d = opaque;
+ i8257_dma_run(d);
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_i8257 = {
+ .name = "dma",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .post_load = i8257_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(command, I8257State),
+ VMSTATE_UINT8(mask, I8257State),
+ VMSTATE_UINT8(flip_flop, I8257State),
+ VMSTATE_INT32(dshift, I8257State),
+ VMSTATE_STRUCT_ARRAY(regs, I8257State, 4, 1, vmstate_i8257_regs,
+ I8257Regs),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void i8257_realize(DeviceState *dev, Error **errp)
+{
+ ISADevice *isa = ISA_DEVICE(dev);
+ I8257State *d = I8257(dev);
+ int i;
+
+ memory_region_init_io(&d->channel_io, OBJECT(dev), &channel_io_ops, d,
+ "dma-chan", 8 << d->dshift);
+ memory_region_add_subregion(isa_address_space_io(isa),
+ d->base, &d->channel_io);
+
+ isa_register_portio_list(isa, &d->portio_page,
+ d->page_base, page_portio_list, d,
+ "dma-page");
+ if (d->pageh_base >= 0) {
+ isa_register_portio_list(isa, &d->portio_pageh,
+ d->pageh_base, pageh_portio_list, d,
+ "dma-pageh");
+ }
+
+ memory_region_init_io(&d->cont_io, OBJECT(isa), &cont_io_ops, d,
+ "dma-cont", 8 << d->dshift);
+ memory_region_add_subregion(isa_address_space_io(isa),
+ d->base + (8 << d->dshift), &d->cont_io);
+
+ for (i = 0; i < ARRAY_SIZE(d->regs); ++i) {
+ d->regs[i].transfer_handler = i8257_phony_handler;
+ }
+
+ d->dma_bh = qemu_bh_new(i8257_dma_run, d);
+}
+
+static Property i8257_properties[] = {
+ DEFINE_PROP_INT32("base", I8257State, base, 0x00),
+ DEFINE_PROP_INT32("page-base", I8257State, page_base, 0x80),
+ DEFINE_PROP_INT32("pageh-base", I8257State, pageh_base, 0x480),
+ DEFINE_PROP_INT32("dshift", I8257State, dshift, 0),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+static void i8257_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ IsaDmaClass *idc = ISADMA_CLASS(klass);
+
+ dc->realize = i8257_realize;
+ dc->reset = i8257_reset;
+ dc->vmsd = &vmstate_i8257;
+ device_class_set_props(dc, i8257_properties);
+
+ idc->has_autoinitialization = i8257_dma_has_autoinitialization;
+ idc->read_memory = i8257_dma_read_memory;
+ idc->write_memory = i8257_dma_write_memory;
+ idc->hold_DREQ = i8257_dma_hold_DREQ;
+ idc->release_DREQ = i8257_dma_release_DREQ;
+ idc->schedule = i8257_dma_schedule;
+ idc->register_channel = i8257_dma_register_channel;
+ /* Reason: needs to be wired up by isa_bus_dma() to work */
+ dc->user_creatable = false;
+}
+
+static const TypeInfo i8257_info = {
+ .name = TYPE_I8257,
+ .parent = TYPE_ISA_DEVICE,
+ .instance_size = sizeof(I8257State),
+ .class_init = i8257_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_ISADMA },
+ { }
+ }
+};
+
+static void i8257_register_types(void)
+{
+ type_register_static(&i8257_info);
+}
+
+type_init(i8257_register_types)
+
+void i8257_dma_init(ISABus *bus, bool high_page_enable)
+{
+ ISADevice *isa1, *isa2;
+ DeviceState *d;
+
+ isa1 = isa_new(TYPE_I8257);
+ d = DEVICE(isa1);
+ qdev_prop_set_int32(d, "base", 0x00);
+ qdev_prop_set_int32(d, "page-base", 0x80);
+ qdev_prop_set_int32(d, "pageh-base", high_page_enable ? 0x480 : -1);
+ qdev_prop_set_int32(d, "dshift", 0);
+ isa_realize_and_unref(isa1, bus, &error_fatal);
+
+ isa2 = isa_new(TYPE_I8257);
+ d = DEVICE(isa2);
+ qdev_prop_set_int32(d, "base", 0xc0);
+ qdev_prop_set_int32(d, "page-base", 0x88);
+ qdev_prop_set_int32(d, "pageh-base", high_page_enable ? 0x488 : -1);
+ qdev_prop_set_int32(d, "dshift", 1);
+ isa_realize_and_unref(isa2, bus, &error_fatal);
+
+ isa_bus_dma(bus, ISADMA(isa1), ISADMA(isa2));
+}
diff --git a/hw/dma/meson.build b/hw/dma/meson.build
new file mode 100644
index 000000000..f3f0661bc
--- /dev/null
+++ b/hw/dma/meson.build
@@ -0,0 +1,16 @@
+softmmu_ss.add(when: 'CONFIG_RC4030', if_true: files('rc4030.c'))
+softmmu_ss.add(when: 'CONFIG_PL080', if_true: files('pl080.c'))
+softmmu_ss.add(when: 'CONFIG_PL330', if_true: files('pl330.c'))
+softmmu_ss.add(when: 'CONFIG_I82374', if_true: files('i82374.c'))
+softmmu_ss.add(when: 'CONFIG_I8257', if_true: files('i8257.c'))
+softmmu_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('xilinx_axidma.c'))
+softmmu_ss.add(when: 'CONFIG_ZYNQ_DEVCFG', if_true: files('xlnx-zynq-devcfg.c'))
+softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_dma.c'))
+softmmu_ss.add(when: 'CONFIG_STP2000', if_true: files('sparc32_dma.c'))
+softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx_dpdma.c'))
+softmmu_ss.add(when: 'CONFIG_XLNX_ZDMA', if_true: files('xlnx-zdma.c'))
+softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_dma.c', 'soc_dma.c'))
+softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_dma.c'))
+softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_dma.c'))
+softmmu_ss.add(when: 'CONFIG_SIFIVE_PDMA', if_true: files('sifive_pdma.c'))
+softmmu_ss.add(when: 'CONFIG_XLNX_CSU_DMA', if_true: files('xlnx_csu_dma.c'))
diff --git a/hw/dma/omap_dma.c b/hw/dma/omap_dma.c
new file mode 100644
index 000000000..6677237d4
--- /dev/null
+++ b/hw/dma/omap_dma.c
@@ -0,0 +1,2124 @@
+/*
+ * TI OMAP DMA gigacell.
+ *
+ * Copyright (C) 2006-2008 Andrzej Zaborowski <balrog@zabor.org>
+ * Copyright (C) 2007-2008 Lauro Ramos Venancio <lauro.venancio@indt.org.br>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/timer.h"
+#include "hw/arm/omap.h"
+#include "hw/irq.h"
+#include "hw/arm/soc_dma.h"
+
+struct omap_dma_channel_s {
+ /* transfer data */
+ int burst[2];
+ int pack[2];
+ int endian[2];
+ int endian_lock[2];
+ int translate[2];
+ enum omap_dma_port port[2];
+ hwaddr addr[2];
+ omap_dma_addressing_t mode[2];
+ uint32_t elements;
+ uint16_t frames;
+ int32_t frame_index[2];
+ int16_t element_index[2];
+ int data_type;
+
+ /* transfer type */
+ int transparent_copy;
+ int constant_fill;
+ uint32_t color;
+ int prefetch;
+
+ /* auto init and linked channel data */
+ int end_prog;
+ int repeat;
+ int auto_init;
+ int link_enabled;
+ int link_next_ch;
+
+ /* interruption data */
+ int interrupts;
+ int status;
+ int cstatus;
+
+ /* state data */
+ int active;
+ int enable;
+ int sync;
+ int src_sync;
+ int pending_request;
+ int waiting_end_prog;
+ uint16_t cpc;
+ int set_update;
+
+ /* sync type */
+ int fs;
+ int bs;
+
+ /* compatibility */
+ int omap_3_1_compatible_disable;
+
+ qemu_irq irq;
+ struct omap_dma_channel_s *sibling;
+
+ struct omap_dma_reg_set_s {
+ hwaddr src, dest;
+ int frame;
+ int element;
+ int pck_element;
+ int frame_delta[2];
+ int elem_delta[2];
+ int frames;
+ int elements;
+ int pck_elements;
+ } active_set;
+
+ struct soc_dma_ch_s *dma;
+
+ /* unused parameters */
+ int write_mode;
+ int priority;
+ int interleave_disabled;
+ int type;
+ int suspend;
+ int buf_disable;
+};
+
+struct omap_dma_s {
+ struct soc_dma_s *dma;
+ MemoryRegion iomem;
+
+ struct omap_mpu_state_s *mpu;
+ omap_clk clk;
+ qemu_irq irq[4];
+ void (*intr_update)(struct omap_dma_s *s);
+ enum omap_dma_model model;
+ int omap_3_1_mapping_disabled;
+
+ uint32_t gcr;
+ uint32_t ocp;
+ uint32_t caps[5];
+ uint32_t irqen[4];
+ uint32_t irqstat[4];
+
+ int chans;
+ struct omap_dma_channel_s ch[32];
+ struct omap_dma_lcd_channel_s lcd_ch;
+};
+
+/* Interrupts */
+#define TIMEOUT_INTR (1 << 0)
+#define EVENT_DROP_INTR (1 << 1)
+#define HALF_FRAME_INTR (1 << 2)
+#define END_FRAME_INTR (1 << 3)
+#define LAST_FRAME_INTR (1 << 4)
+#define END_BLOCK_INTR (1 << 5)
+#define SYNC (1 << 6)
+#define END_PKT_INTR (1 << 7)
+#define TRANS_ERR_INTR (1 << 8)
+#define MISALIGN_INTR (1 << 11)
+
+static inline void omap_dma_interrupts_update(struct omap_dma_s *s)
+{
+ s->intr_update(s);
+}
+
+static void omap_dma_channel_load(struct omap_dma_channel_s *ch)
+{
+ struct omap_dma_reg_set_s *a = &ch->active_set;
+ int i, normal;
+ int omap_3_1 = !ch->omap_3_1_compatible_disable;
+
+ /*
+ * TODO: verify address ranges and alignment
+ * TODO: port endianness
+ */
+
+ a->src = ch->addr[0];
+ a->dest = ch->addr[1];
+ a->frames = ch->frames;
+ a->elements = ch->elements;
+ a->pck_elements = ch->frame_index[!ch->src_sync];
+ a->frame = 0;
+ a->element = 0;
+ a->pck_element = 0;
+
+ if (unlikely(!ch->elements || !ch->frames)) {
+ printf("%s: bad DMA request\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < 2; i ++)
+ switch (ch->mode[i]) {
+ case constant:
+ a->elem_delta[i] = 0;
+ a->frame_delta[i] = 0;
+ break;
+ case post_incremented:
+ a->elem_delta[i] = ch->data_type;
+ a->frame_delta[i] = 0;
+ break;
+ case single_index:
+ a->elem_delta[i] = ch->data_type +
+ ch->element_index[omap_3_1 ? 0 : i] - 1;
+ a->frame_delta[i] = 0;
+ break;
+ case double_index:
+ a->elem_delta[i] = ch->data_type +
+ ch->element_index[omap_3_1 ? 0 : i] - 1;
+ a->frame_delta[i] = ch->frame_index[omap_3_1 ? 0 : i] -
+ ch->element_index[omap_3_1 ? 0 : i];
+ break;
+ default:
+ break;
+ }
+
+ normal = !ch->transparent_copy && !ch->constant_fill &&
+ /* FIFO is big-endian so either (ch->endian[n] == 1) OR
+ * (ch->endian_lock[n] == 1) mean no endianism conversion. */
+ (ch->endian[0] | ch->endian_lock[0]) ==
+ (ch->endian[1] | ch->endian_lock[1]);
+ for (i = 0; i < 2; i ++) {
+ /* TODO: for a->frame_delta[i] > 0 still use the fast path, just
+ * limit min_elems in omap_dma_transfer_setup to the nearest frame
+ * end. */
+ if (!a->elem_delta[i] && normal &&
+ (a->frames == 1 || !a->frame_delta[i]))
+ ch->dma->type[i] = soc_dma_access_const;
+ else if (a->elem_delta[i] == ch->data_type && normal &&
+ (a->frames == 1 || !a->frame_delta[i]))
+ ch->dma->type[i] = soc_dma_access_linear;
+ else
+ ch->dma->type[i] = soc_dma_access_other;
+
+ ch->dma->vaddr[i] = ch->addr[i];
+ }
+ soc_dma_ch_update(ch->dma);
+}
+
+static void omap_dma_activate_channel(struct omap_dma_s *s,
+ struct omap_dma_channel_s *ch)
+{
+ if (!ch->active) {
+ if (ch->set_update) {
+ /* It's not clear when the active set is supposed to be
+ * loaded from registers. We're already loading it when the
+ * channel is enabled, and for some guests this is not enough
+ * but that may be also because of a race condition (no
+ * delays in qemu) in the guest code, which we're just
+ * working around here. */
+ omap_dma_channel_load(ch);
+ ch->set_update = 0;
+ }
+
+ ch->active = 1;
+ soc_dma_set_request(ch->dma, 1);
+ if (ch->sync)
+ ch->status |= SYNC;
+ }
+}
+
+static void omap_dma_deactivate_channel(struct omap_dma_s *s,
+ struct omap_dma_channel_s *ch)
+{
+ /* Update cpc */
+ ch->cpc = ch->active_set.dest & 0xffff;
+
+ if (ch->pending_request && !ch->waiting_end_prog && ch->enable) {
+ /* Don't deactivate the channel */
+ ch->pending_request = 0;
+ return;
+ }
+
+ /* Don't deactive the channel if it is synchronized and the DMA request is
+ active */
+ if (ch->sync && ch->enable && (s->dma->drqbmp & (1ULL << ch->sync)))
+ return;
+
+ if (ch->active) {
+ ch->active = 0;
+ ch->status &= ~SYNC;
+ soc_dma_set_request(ch->dma, 0);
+ }
+}
+
+static void omap_dma_enable_channel(struct omap_dma_s *s,
+ struct omap_dma_channel_s *ch)
+{
+ if (!ch->enable) {
+ ch->enable = 1;
+ ch->waiting_end_prog = 0;
+ omap_dma_channel_load(ch);
+ /* TODO: theoretically if ch->sync && ch->prefetch &&
+ * !s->dma->drqbmp[ch->sync], we should also activate and fetch
+ * from source and then stall until signalled. */
+ if ((!ch->sync) || (s->dma->drqbmp & (1ULL << ch->sync))) {
+ omap_dma_activate_channel(s, ch);
+ }
+ }
+}
+
+static void omap_dma_disable_channel(struct omap_dma_s *s,
+ struct omap_dma_channel_s *ch)
+{
+ if (ch->enable) {
+ ch->enable = 0;
+ /* Discard any pending request */
+ ch->pending_request = 0;
+ omap_dma_deactivate_channel(s, ch);
+ }
+}
+
+static void omap_dma_channel_end_prog(struct omap_dma_s *s,
+ struct omap_dma_channel_s *ch)
+{
+ if (ch->waiting_end_prog) {
+ ch->waiting_end_prog = 0;
+ if (!ch->sync || ch->pending_request) {
+ ch->pending_request = 0;
+ omap_dma_activate_channel(s, ch);
+ }
+ }
+}
+
+static void omap_dma_interrupts_3_1_update(struct omap_dma_s *s)
+{
+ struct omap_dma_channel_s *ch = s->ch;
+
+ /* First three interrupts are shared between two channels each. */
+ if (ch[0].status | ch[6].status)
+ qemu_irq_raise(ch[0].irq);
+ if (ch[1].status | ch[7].status)
+ qemu_irq_raise(ch[1].irq);
+ if (ch[2].status | ch[8].status)
+ qemu_irq_raise(ch[2].irq);
+ if (ch[3].status)
+ qemu_irq_raise(ch[3].irq);
+ if (ch[4].status)
+ qemu_irq_raise(ch[4].irq);
+ if (ch[5].status)
+ qemu_irq_raise(ch[5].irq);
+}
+
+static void omap_dma_interrupts_3_2_update(struct omap_dma_s *s)
+{
+ struct omap_dma_channel_s *ch = s->ch;
+ int i;
+
+ for (i = s->chans; i; ch ++, i --)
+ if (ch->status)
+ qemu_irq_raise(ch->irq);
+}
+
+static void omap_dma_enable_3_1_mapping(struct omap_dma_s *s)
+{
+ s->omap_3_1_mapping_disabled = 0;
+ s->chans = 9;
+ s->intr_update = omap_dma_interrupts_3_1_update;
+}
+
+static void omap_dma_disable_3_1_mapping(struct omap_dma_s *s)
+{
+ s->omap_3_1_mapping_disabled = 1;
+ s->chans = 16;
+ s->intr_update = omap_dma_interrupts_3_2_update;
+}
+
+static void omap_dma_process_request(struct omap_dma_s *s, int request)
+{
+ int channel;
+ int drop_event = 0;
+ struct omap_dma_channel_s *ch = s->ch;
+
+ for (channel = 0; channel < s->chans; channel ++, ch ++) {
+ if (ch->enable && ch->sync == request) {
+ if (!ch->active)
+ omap_dma_activate_channel(s, ch);
+ else if (!ch->pending_request)
+ ch->pending_request = 1;
+ else {
+ /* Request collision */
+ /* Second request received while processing other request */
+ ch->status |= EVENT_DROP_INTR;
+ drop_event = 1;
+ }
+ }
+ }
+
+ if (drop_event)
+ omap_dma_interrupts_update(s);
+}
+
+static void omap_dma_transfer_generic(struct soc_dma_ch_s *dma)
+{
+ uint8_t value[4];
+ struct omap_dma_channel_s *ch = dma->opaque;
+ struct omap_dma_reg_set_s *a = &ch->active_set;
+ int bytes = dma->bytes;
+#ifdef MULTI_REQ
+ uint16_t status = ch->status;
+#endif
+
+ do {
+ /* Transfer a single element */
+ /* FIXME: check the endianness */
+ if (!ch->constant_fill)
+ cpu_physical_memory_read(a->src, value, ch->data_type);
+ else
+ *(uint32_t *) value = ch->color;
+
+ if (!ch->transparent_copy || *(uint32_t *) value != ch->color)
+ cpu_physical_memory_write(a->dest, value, ch->data_type);
+
+ a->src += a->elem_delta[0];
+ a->dest += a->elem_delta[1];
+ a->element ++;
+
+#ifndef MULTI_REQ
+ if (a->element == a->elements) {
+ /* End of Frame */
+ a->element = 0;
+ a->src += a->frame_delta[0];
+ a->dest += a->frame_delta[1];
+ a->frame ++;
+
+ /* If the channel is async, update cpc */
+ if (!ch->sync)
+ ch->cpc = a->dest & 0xffff;
+ }
+ } while ((bytes -= ch->data_type));
+#else
+ /* If the channel is element synchronized, deactivate it */
+ if (ch->sync && !ch->fs && !ch->bs)
+ omap_dma_deactivate_channel(s, ch);
+
+ /* If it is the last frame, set the LAST_FRAME interrupt */
+ if (a->element == 1 && a->frame == a->frames - 1)
+ if (ch->interrupts & LAST_FRAME_INTR)
+ ch->status |= LAST_FRAME_INTR;
+
+ /* If the half of the frame was reached, set the HALF_FRAME
+ interrupt */
+ if (a->element == (a->elements >> 1))
+ if (ch->interrupts & HALF_FRAME_INTR)
+ ch->status |= HALF_FRAME_INTR;
+
+ if (ch->fs && ch->bs) {
+ a->pck_element ++;
+ /* Check if a full packet has beed transferred. */
+ if (a->pck_element == a->pck_elements) {
+ a->pck_element = 0;
+
+ /* Set the END_PKT interrupt */
+ if ((ch->interrupts & END_PKT_INTR) && !ch->src_sync)
+ ch->status |= END_PKT_INTR;
+
+ /* If the channel is packet-synchronized, deactivate it */
+ if (ch->sync)
+ omap_dma_deactivate_channel(s, ch);
+ }
+ }
+
+ if (a->element == a->elements) {
+ /* End of Frame */
+ a->element = 0;
+ a->src += a->frame_delta[0];
+ a->dest += a->frame_delta[1];
+ a->frame ++;
+
+ /* If the channel is frame synchronized, deactivate it */
+ if (ch->sync && ch->fs && !ch->bs)
+ omap_dma_deactivate_channel(s, ch);
+
+ /* If the channel is async, update cpc */
+ if (!ch->sync)
+ ch->cpc = a->dest & 0xffff;
+
+ /* Set the END_FRAME interrupt */
+ if (ch->interrupts & END_FRAME_INTR)
+ ch->status |= END_FRAME_INTR;
+
+ if (a->frame == a->frames) {
+ /* End of Block */
+ /* Disable the channel */
+
+ if (ch->omap_3_1_compatible_disable) {
+ omap_dma_disable_channel(s, ch);
+ if (ch->link_enabled)
+ omap_dma_enable_channel(s,
+ &s->ch[ch->link_next_ch]);
+ } else {
+ if (!ch->auto_init)
+ omap_dma_disable_channel(s, ch);
+ else if (ch->repeat || ch->end_prog)
+ omap_dma_channel_load(ch);
+ else {
+ ch->waiting_end_prog = 1;
+ omap_dma_deactivate_channel(s, ch);
+ }
+ }
+
+ if (ch->interrupts & END_BLOCK_INTR)
+ ch->status |= END_BLOCK_INTR;
+ }
+ }
+ } while (status == ch->status && ch->active);
+
+ omap_dma_interrupts_update(s);
+#endif
+}
+
+enum {
+ omap_dma_intr_element_sync,
+ omap_dma_intr_last_frame,
+ omap_dma_intr_half_frame,
+ omap_dma_intr_frame,
+ omap_dma_intr_frame_sync,
+ omap_dma_intr_packet,
+ omap_dma_intr_packet_sync,
+ omap_dma_intr_block,
+ __omap_dma_intr_last,
+};
+
+static void omap_dma_transfer_setup(struct soc_dma_ch_s *dma)
+{
+ struct omap_dma_port_if_s *src_p, *dest_p;
+ struct omap_dma_reg_set_s *a;
+ struct omap_dma_channel_s *ch = dma->opaque;
+ struct omap_dma_s *s = dma->dma->opaque;
+ int frames, min_elems, elements[__omap_dma_intr_last];
+
+ a = &ch->active_set;
+
+ src_p = &s->mpu->port[ch->port[0]];
+ dest_p = &s->mpu->port[ch->port[1]];
+ if ((!ch->constant_fill && !src_p->addr_valid(s->mpu, a->src)) ||
+ (!dest_p->addr_valid(s->mpu, a->dest))) {
+#if 0
+ /* Bus time-out */
+ if (ch->interrupts & TIMEOUT_INTR)
+ ch->status |= TIMEOUT_INTR;
+ omap_dma_deactivate_channel(s, ch);
+ continue;
+#endif
+ printf("%s: Bus time-out in DMA%i operation\n",
+ __func__, dma->num);
+ }
+
+ min_elems = INT_MAX;
+
+ /* Check all the conditions that terminate the transfer starting
+ * with those that can occur the soonest. */
+#define INTR_CHECK(cond, id, nelements) \
+ if (cond) { \
+ elements[id] = nelements; \
+ if (elements[id] < min_elems) \
+ min_elems = elements[id]; \
+ } else \
+ elements[id] = INT_MAX;
+
+ /* Elements */
+ INTR_CHECK(
+ ch->sync && !ch->fs && !ch->bs,
+ omap_dma_intr_element_sync,
+ 1)
+
+ /* Frames */
+ /* TODO: for transfers where entire frames can be read and written
+ * using memcpy() but a->frame_delta is non-zero, try to still do
+ * transfers using soc_dma but limit min_elems to a->elements - ...
+ * See also the TODO in omap_dma_channel_load. */
+ INTR_CHECK(
+ (ch->interrupts & LAST_FRAME_INTR) &&
+ ((a->frame < a->frames - 1) || !a->element),
+ omap_dma_intr_last_frame,
+ (a->frames - a->frame - 2) * a->elements +
+ (a->elements - a->element + 1))
+ INTR_CHECK(
+ ch->interrupts & HALF_FRAME_INTR,
+ omap_dma_intr_half_frame,
+ (a->elements >> 1) +
+ (a->element >= (a->elements >> 1) ? a->elements : 0) -
+ a->element)
+ INTR_CHECK(
+ ch->sync && ch->fs && (ch->interrupts & END_FRAME_INTR),
+ omap_dma_intr_frame,
+ a->elements - a->element)
+ INTR_CHECK(
+ ch->sync && ch->fs && !ch->bs,
+ omap_dma_intr_frame_sync,
+ a->elements - a->element)
+
+ /* Packets */
+ INTR_CHECK(
+ ch->fs && ch->bs &&
+ (ch->interrupts & END_PKT_INTR) && !ch->src_sync,
+ omap_dma_intr_packet,
+ a->pck_elements - a->pck_element)
+ INTR_CHECK(
+ ch->fs && ch->bs && ch->sync,
+ omap_dma_intr_packet_sync,
+ a->pck_elements - a->pck_element)
+
+ /* Blocks */
+ INTR_CHECK(
+ 1,
+ omap_dma_intr_block,
+ (a->frames - a->frame - 1) * a->elements +
+ (a->elements - a->element))
+
+ dma->bytes = min_elems * ch->data_type;
+
+ /* Set appropriate interrupts and/or deactivate channels */
+
+#ifdef MULTI_REQ
+ /* TODO: should all of this only be done if dma->update, and otherwise
+ * inside omap_dma_transfer_generic below - check what's faster. */
+ if (dma->update) {
+#endif
+
+ /* If the channel is element synchronized, deactivate it */
+ if (min_elems == elements[omap_dma_intr_element_sync])
+ omap_dma_deactivate_channel(s, ch);
+
+ /* If it is the last frame, set the LAST_FRAME interrupt */
+ if (min_elems == elements[omap_dma_intr_last_frame])
+ ch->status |= LAST_FRAME_INTR;
+
+ /* If exactly half of the frame was reached, set the HALF_FRAME
+ interrupt */
+ if (min_elems == elements[omap_dma_intr_half_frame])
+ ch->status |= HALF_FRAME_INTR;
+
+ /* If a full packet has been transferred, set the END_PKT interrupt */
+ if (min_elems == elements[omap_dma_intr_packet])
+ ch->status |= END_PKT_INTR;
+
+ /* If the channel is packet-synchronized, deactivate it */
+ if (min_elems == elements[omap_dma_intr_packet_sync])
+ omap_dma_deactivate_channel(s, ch);
+
+ /* If the channel is frame synchronized, deactivate it */
+ if (min_elems == elements[omap_dma_intr_frame_sync])
+ omap_dma_deactivate_channel(s, ch);
+
+ /* Set the END_FRAME interrupt */
+ if (min_elems == elements[omap_dma_intr_frame])
+ ch->status |= END_FRAME_INTR;
+
+ if (min_elems == elements[omap_dma_intr_block]) {
+ /* End of Block */
+ /* Disable the channel */
+
+ if (ch->omap_3_1_compatible_disable) {
+ omap_dma_disable_channel(s, ch);
+ if (ch->link_enabled)
+ omap_dma_enable_channel(s, &s->ch[ch->link_next_ch]);
+ } else {
+ if (!ch->auto_init)
+ omap_dma_disable_channel(s, ch);
+ else if (ch->repeat || ch->end_prog)
+ omap_dma_channel_load(ch);
+ else {
+ ch->waiting_end_prog = 1;
+ omap_dma_deactivate_channel(s, ch);
+ }
+ }
+
+ if (ch->interrupts & END_BLOCK_INTR)
+ ch->status |= END_BLOCK_INTR;
+ }
+
+ /* Update packet number */
+ if (ch->fs && ch->bs) {
+ a->pck_element += min_elems;
+ a->pck_element %= a->pck_elements;
+ }
+
+ /* TODO: check if we really need to update anything here or perhaps we
+ * can skip part of this. */
+#ifndef MULTI_REQ
+ if (dma->update) {
+#endif
+ a->element += min_elems;
+
+ frames = a->element / a->elements;
+ a->element = a->element % a->elements;
+ a->frame += frames;
+ a->src += min_elems * a->elem_delta[0] + frames * a->frame_delta[0];
+ a->dest += min_elems * a->elem_delta[1] + frames * a->frame_delta[1];
+
+ /* If the channel is async, update cpc */
+ if (!ch->sync && frames)
+ ch->cpc = a->dest & 0xffff;
+
+ /* TODO: if the destination port is IMIF or EMIFF, set the dirty
+ * bits on it. */
+#ifndef MULTI_REQ
+ }
+#else
+ }
+#endif
+
+ omap_dma_interrupts_update(s);
+}
+
+void omap_dma_reset(struct soc_dma_s *dma)
+{
+ int i;
+ struct omap_dma_s *s = dma->opaque;
+
+ soc_dma_reset(s->dma);
+ if (s->model < omap_dma_4)
+ s->gcr = 0x0004;
+ else
+ s->gcr = 0x00010010;
+ s->ocp = 0x00000000;
+ memset(&s->irqstat, 0, sizeof(s->irqstat));
+ memset(&s->irqen, 0, sizeof(s->irqen));
+ s->lcd_ch.src = emiff;
+ s->lcd_ch.condition = 0;
+ s->lcd_ch.interrupts = 0;
+ s->lcd_ch.dual = 0;
+ if (s->model < omap_dma_4)
+ omap_dma_enable_3_1_mapping(s);
+ for (i = 0; i < s->chans; i ++) {
+ s->ch[i].suspend = 0;
+ s->ch[i].prefetch = 0;
+ s->ch[i].buf_disable = 0;
+ s->ch[i].src_sync = 0;
+ memset(&s->ch[i].burst, 0, sizeof(s->ch[i].burst));
+ memset(&s->ch[i].port, 0, sizeof(s->ch[i].port));
+ memset(&s->ch[i].mode, 0, sizeof(s->ch[i].mode));
+ memset(&s->ch[i].frame_index, 0, sizeof(s->ch[i].frame_index));
+ memset(&s->ch[i].element_index, 0, sizeof(s->ch[i].element_index));
+ memset(&s->ch[i].endian, 0, sizeof(s->ch[i].endian));
+ memset(&s->ch[i].endian_lock, 0, sizeof(s->ch[i].endian_lock));
+ memset(&s->ch[i].translate, 0, sizeof(s->ch[i].translate));
+ s->ch[i].write_mode = 0;
+ s->ch[i].data_type = 0;
+ s->ch[i].transparent_copy = 0;
+ s->ch[i].constant_fill = 0;
+ s->ch[i].color = 0x00000000;
+ s->ch[i].end_prog = 0;
+ s->ch[i].repeat = 0;
+ s->ch[i].auto_init = 0;
+ s->ch[i].link_enabled = 0;
+ if (s->model < omap_dma_4)
+ s->ch[i].interrupts = 0x0003;
+ else
+ s->ch[i].interrupts = 0x0000;
+ s->ch[i].status = 0;
+ s->ch[i].cstatus = 0;
+ s->ch[i].active = 0;
+ s->ch[i].enable = 0;
+ s->ch[i].sync = 0;
+ s->ch[i].pending_request = 0;
+ s->ch[i].waiting_end_prog = 0;
+ s->ch[i].cpc = 0x0000;
+ s->ch[i].fs = 0;
+ s->ch[i].bs = 0;
+ s->ch[i].omap_3_1_compatible_disable = 0;
+ memset(&s->ch[i].active_set, 0, sizeof(s->ch[i].active_set));
+ s->ch[i].priority = 0;
+ s->ch[i].interleave_disabled = 0;
+ s->ch[i].type = 0;
+ }
+}
+
+static int omap_dma_ch_reg_read(struct omap_dma_s *s,
+ struct omap_dma_channel_s *ch, int reg, uint16_t *value)
+{
+ switch (reg) {
+ case 0x00: /* SYS_DMA_CSDP_CH0 */
+ *value = (ch->burst[1] << 14) |
+ (ch->pack[1] << 13) |
+ (ch->port[1] << 9) |
+ (ch->burst[0] << 7) |
+ (ch->pack[0] << 6) |
+ (ch->port[0] << 2) |
+ (ch->data_type >> 1);
+ break;
+
+ case 0x02: /* SYS_DMA_CCR_CH0 */
+ if (s->model <= omap_dma_3_1)
+ *value = 0 << 10; /* FIFO_FLUSH reads as 0 */
+ else
+ *value = ch->omap_3_1_compatible_disable << 10;
+ *value |= (ch->mode[1] << 14) |
+ (ch->mode[0] << 12) |
+ (ch->end_prog << 11) |
+ (ch->repeat << 9) |
+ (ch->auto_init << 8) |
+ (ch->enable << 7) |
+ (ch->priority << 6) |
+ (ch->fs << 5) | ch->sync;
+ break;
+
+ case 0x04: /* SYS_DMA_CICR_CH0 */
+ *value = ch->interrupts;
+ break;
+
+ case 0x06: /* SYS_DMA_CSR_CH0 */
+ *value = ch->status;
+ ch->status &= SYNC;
+ if (!ch->omap_3_1_compatible_disable && ch->sibling) {
+ *value |= (ch->sibling->status & 0x3f) << 6;
+ ch->sibling->status &= SYNC;
+ }
+ qemu_irq_lower(ch->irq);
+ break;
+
+ case 0x08: /* SYS_DMA_CSSA_L_CH0 */
+ *value = ch->addr[0] & 0x0000ffff;
+ break;
+
+ case 0x0a: /* SYS_DMA_CSSA_U_CH0 */
+ *value = ch->addr[0] >> 16;
+ break;
+
+ case 0x0c: /* SYS_DMA_CDSA_L_CH0 */
+ *value = ch->addr[1] & 0x0000ffff;
+ break;
+
+ case 0x0e: /* SYS_DMA_CDSA_U_CH0 */
+ *value = ch->addr[1] >> 16;
+ break;
+
+ case 0x10: /* SYS_DMA_CEN_CH0 */
+ *value = ch->elements;
+ break;
+
+ case 0x12: /* SYS_DMA_CFN_CH0 */
+ *value = ch->frames;
+ break;
+
+ case 0x14: /* SYS_DMA_CFI_CH0 */
+ *value = ch->frame_index[0];
+ break;
+
+ case 0x16: /* SYS_DMA_CEI_CH0 */
+ *value = ch->element_index[0];
+ break;
+
+ case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */
+ if (ch->omap_3_1_compatible_disable)
+ *value = ch->active_set.src & 0xffff; /* CSAC */
+ else
+ *value = ch->cpc;
+ break;
+
+ case 0x1a: /* DMA_CDAC */
+ *value = ch->active_set.dest & 0xffff; /* CDAC */
+ break;
+
+ case 0x1c: /* DMA_CDEI */
+ *value = ch->element_index[1];
+ break;
+
+ case 0x1e: /* DMA_CDFI */
+ *value = ch->frame_index[1];
+ break;
+
+ case 0x20: /* DMA_COLOR_L */
+ *value = ch->color & 0xffff;
+ break;
+
+ case 0x22: /* DMA_COLOR_U */
+ *value = ch->color >> 16;
+ break;
+
+ case 0x24: /* DMA_CCR2 */
+ *value = (ch->bs << 2) |
+ (ch->transparent_copy << 1) |
+ ch->constant_fill;
+ break;
+
+ case 0x28: /* DMA_CLNK_CTRL */
+ *value = (ch->link_enabled << 15) |
+ (ch->link_next_ch & 0xf);
+ break;
+
+ case 0x2a: /* DMA_LCH_CTRL */
+ *value = (ch->interleave_disabled << 15) |
+ ch->type;
+ break;
+
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+static int omap_dma_ch_reg_write(struct omap_dma_s *s,
+ struct omap_dma_channel_s *ch, int reg, uint16_t value)
+{
+ switch (reg) {
+ case 0x00: /* SYS_DMA_CSDP_CH0 */
+ ch->burst[1] = (value & 0xc000) >> 14;
+ ch->pack[1] = (value & 0x2000) >> 13;
+ ch->port[1] = (enum omap_dma_port) ((value & 0x1e00) >> 9);
+ ch->burst[0] = (value & 0x0180) >> 7;
+ ch->pack[0] = (value & 0x0040) >> 6;
+ ch->port[0] = (enum omap_dma_port) ((value & 0x003c) >> 2);
+ if (ch->port[0] >= __omap_dma_port_last) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA port %i\n",
+ __func__, ch->port[0]);
+ }
+ if (ch->port[1] >= __omap_dma_port_last) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA port %i\n",
+ __func__, ch->port[1]);
+ }
+ ch->data_type = 1 << (value & 3);
+ if ((value & 3) == 3) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: bad data_type for DMA channel\n", __func__);
+ ch->data_type >>= 1;
+ }
+ break;
+
+ case 0x02: /* SYS_DMA_CCR_CH0 */
+ ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14);
+ ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12);
+ ch->end_prog = (value & 0x0800) >> 11;
+ if (s->model >= omap_dma_3_2)
+ ch->omap_3_1_compatible_disable = (value >> 10) & 0x1;
+ ch->repeat = (value & 0x0200) >> 9;
+ ch->auto_init = (value & 0x0100) >> 8;
+ ch->priority = (value & 0x0040) >> 6;
+ ch->fs = (value & 0x0020) >> 5;
+ ch->sync = value & 0x001f;
+
+ if (value & 0x0080)
+ omap_dma_enable_channel(s, ch);
+ else
+ omap_dma_disable_channel(s, ch);
+
+ if (ch->end_prog)
+ omap_dma_channel_end_prog(s, ch);
+
+ break;
+
+ case 0x04: /* SYS_DMA_CICR_CH0 */
+ ch->interrupts = value & 0x3f;
+ break;
+
+ case 0x06: /* SYS_DMA_CSR_CH0 */
+ OMAP_RO_REG((hwaddr) reg);
+ break;
+
+ case 0x08: /* SYS_DMA_CSSA_L_CH0 */
+ ch->addr[0] &= 0xffff0000;
+ ch->addr[0] |= value;
+ break;
+
+ case 0x0a: /* SYS_DMA_CSSA_U_CH0 */
+ ch->addr[0] &= 0x0000ffff;
+ ch->addr[0] |= (uint32_t) value << 16;
+ break;
+
+ case 0x0c: /* SYS_DMA_CDSA_L_CH0 */
+ ch->addr[1] &= 0xffff0000;
+ ch->addr[1] |= value;
+ break;
+
+ case 0x0e: /* SYS_DMA_CDSA_U_CH0 */
+ ch->addr[1] &= 0x0000ffff;
+ ch->addr[1] |= (uint32_t) value << 16;
+ break;
+
+ case 0x10: /* SYS_DMA_CEN_CH0 */
+ ch->elements = value;
+ break;
+
+ case 0x12: /* SYS_DMA_CFN_CH0 */
+ ch->frames = value;
+ break;
+
+ case 0x14: /* SYS_DMA_CFI_CH0 */
+ ch->frame_index[0] = (int16_t) value;
+ break;
+
+ case 0x16: /* SYS_DMA_CEI_CH0 */
+ ch->element_index[0] = (int16_t) value;
+ break;
+
+ case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */
+ OMAP_RO_REG((hwaddr) reg);
+ break;
+
+ case 0x1c: /* DMA_CDEI */
+ ch->element_index[1] = (int16_t) value;
+ break;
+
+ case 0x1e: /* DMA_CDFI */
+ ch->frame_index[1] = (int16_t) value;
+ break;
+
+ case 0x20: /* DMA_COLOR_L */
+ ch->color &= 0xffff0000;
+ ch->color |= value;
+ break;
+
+ case 0x22: /* DMA_COLOR_U */
+ ch->color &= 0xffff;
+ ch->color |= (uint32_t)value << 16;
+ break;
+
+ case 0x24: /* DMA_CCR2 */
+ ch->bs = (value >> 2) & 0x1;
+ ch->transparent_copy = (value >> 1) & 0x1;
+ ch->constant_fill = value & 0x1;
+ break;
+
+ case 0x28: /* DMA_CLNK_CTRL */
+ ch->link_enabled = (value >> 15) & 0x1;
+ if (value & (1 << 14)) { /* Stop_Lnk */
+ ch->link_enabled = 0;
+ omap_dma_disable_channel(s, ch);
+ }
+ ch->link_next_ch = value & 0x1f;
+ break;
+
+ case 0x2a: /* DMA_LCH_CTRL */
+ ch->interleave_disabled = (value >> 15) & 0x1;
+ ch->type = value & 0xf;
+ break;
+
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
+ uint16_t value)
+{
+ switch (offset) {
+ case 0xbc0: /* DMA_LCD_CSDP */
+ s->brust_f2 = (value >> 14) & 0x3;
+ s->pack_f2 = (value >> 13) & 0x1;
+ s->data_type_f2 = (1 << ((value >> 11) & 0x3));
+ s->brust_f1 = (value >> 7) & 0x3;
+ s->pack_f1 = (value >> 6) & 0x1;
+ s->data_type_f1 = (1 << ((value >> 0) & 0x3));
+ break;
+
+ case 0xbc2: /* DMA_LCD_CCR */
+ s->mode_f2 = (value >> 14) & 0x3;
+ s->mode_f1 = (value >> 12) & 0x3;
+ s->end_prog = (value >> 11) & 0x1;
+ s->omap_3_1_compatible_disable = (value >> 10) & 0x1;
+ s->repeat = (value >> 9) & 0x1;
+ s->auto_init = (value >> 8) & 0x1;
+ s->running = (value >> 7) & 0x1;
+ s->priority = (value >> 6) & 0x1;
+ s->bs = (value >> 4) & 0x1;
+ break;
+
+ case 0xbc4: /* DMA_LCD_CTRL */
+ s->dst = (value >> 8) & 0x1;
+ s->src = ((value >> 6) & 0x3) << 1;
+ s->condition = 0;
+ /* Assume no bus errors and thus no BUS_ERROR irq bits. */
+ s->interrupts = (value >> 1) & 1;
+ s->dual = value & 1;
+ break;
+
+ case 0xbc8: /* TOP_B1_L */
+ s->src_f1_top &= 0xffff0000;
+ s->src_f1_top |= 0x0000ffff & value;
+ break;
+
+ case 0xbca: /* TOP_B1_U */
+ s->src_f1_top &= 0x0000ffff;
+ s->src_f1_top |= (uint32_t)value << 16;
+ break;
+
+ case 0xbcc: /* BOT_B1_L */
+ s->src_f1_bottom &= 0xffff0000;
+ s->src_f1_bottom |= 0x0000ffff & value;
+ break;
+
+ case 0xbce: /* BOT_B1_U */
+ s->src_f1_bottom &= 0x0000ffff;
+ s->src_f1_bottom |= (uint32_t) value << 16;
+ break;
+
+ case 0xbd0: /* TOP_B2_L */
+ s->src_f2_top &= 0xffff0000;
+ s->src_f2_top |= 0x0000ffff & value;
+ break;
+
+ case 0xbd2: /* TOP_B2_U */
+ s->src_f2_top &= 0x0000ffff;
+ s->src_f2_top |= (uint32_t) value << 16;
+ break;
+
+ case 0xbd4: /* BOT_B2_L */
+ s->src_f2_bottom &= 0xffff0000;
+ s->src_f2_bottom |= 0x0000ffff & value;
+ break;
+
+ case 0xbd6: /* BOT_B2_U */
+ s->src_f2_bottom &= 0x0000ffff;
+ s->src_f2_bottom |= (uint32_t) value << 16;
+ break;
+
+ case 0xbd8: /* DMA_LCD_SRC_EI_B1 */
+ s->element_index_f1 = value;
+ break;
+
+ case 0xbda: /* DMA_LCD_SRC_FI_B1_L */
+ s->frame_index_f1 &= 0xffff0000;
+ s->frame_index_f1 |= 0x0000ffff & value;
+ break;
+
+ case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */
+ s->frame_index_f1 &= 0x0000ffff;
+ s->frame_index_f1 |= (uint32_t) value << 16;
+ break;
+
+ case 0xbdc: /* DMA_LCD_SRC_EI_B2 */
+ s->element_index_f2 = value;
+ break;
+
+ case 0xbde: /* DMA_LCD_SRC_FI_B2_L */
+ s->frame_index_f2 &= 0xffff0000;
+ s->frame_index_f2 |= 0x0000ffff & value;
+ break;
+
+ case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */
+ s->frame_index_f2 &= 0x0000ffff;
+ s->frame_index_f2 |= (uint32_t) value << 16;
+ break;
+
+ case 0xbe0: /* DMA_LCD_SRC_EN_B1 */
+ s->elements_f1 = value;
+ break;
+
+ case 0xbe4: /* DMA_LCD_SRC_FN_B1 */
+ s->frames_f1 = value;
+ break;
+
+ case 0xbe2: /* DMA_LCD_SRC_EN_B2 */
+ s->elements_f2 = value;
+ break;
+
+ case 0xbe6: /* DMA_LCD_SRC_FN_B2 */
+ s->frames_f2 = value;
+ break;
+
+ case 0xbea: /* DMA_LCD_LCH_CTRL */
+ s->lch_type = value & 0xf;
+ break;
+
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
+ uint16_t *ret)
+{
+ switch (offset) {
+ case 0xbc0: /* DMA_LCD_CSDP */
+ *ret = (s->brust_f2 << 14) |
+ (s->pack_f2 << 13) |
+ ((s->data_type_f2 >> 1) << 11) |
+ (s->brust_f1 << 7) |
+ (s->pack_f1 << 6) |
+ ((s->data_type_f1 >> 1) << 0);
+ break;
+
+ case 0xbc2: /* DMA_LCD_CCR */
+ *ret = (s->mode_f2 << 14) |
+ (s->mode_f1 << 12) |
+ (s->end_prog << 11) |
+ (s->omap_3_1_compatible_disable << 10) |
+ (s->repeat << 9) |
+ (s->auto_init << 8) |
+ (s->running << 7) |
+ (s->priority << 6) |
+ (s->bs << 4);
+ break;
+
+ case 0xbc4: /* DMA_LCD_CTRL */
+ qemu_irq_lower(s->irq);
+ *ret = (s->dst << 8) |
+ ((s->src & 0x6) << 5) |
+ (s->condition << 3) |
+ (s->interrupts << 1) |
+ s->dual;
+ break;
+
+ case 0xbc8: /* TOP_B1_L */
+ *ret = s->src_f1_top & 0xffff;
+ break;
+
+ case 0xbca: /* TOP_B1_U */
+ *ret = s->src_f1_top >> 16;
+ break;
+
+ case 0xbcc: /* BOT_B1_L */
+ *ret = s->src_f1_bottom & 0xffff;
+ break;
+
+ case 0xbce: /* BOT_B1_U */
+ *ret = s->src_f1_bottom >> 16;
+ break;
+
+ case 0xbd0: /* TOP_B2_L */
+ *ret = s->src_f2_top & 0xffff;
+ break;
+
+ case 0xbd2: /* TOP_B2_U */
+ *ret = s->src_f2_top >> 16;
+ break;
+
+ case 0xbd4: /* BOT_B2_L */
+ *ret = s->src_f2_bottom & 0xffff;
+ break;
+
+ case 0xbd6: /* BOT_B2_U */
+ *ret = s->src_f2_bottom >> 16;
+ break;
+
+ case 0xbd8: /* DMA_LCD_SRC_EI_B1 */
+ *ret = s->element_index_f1;
+ break;
+
+ case 0xbda: /* DMA_LCD_SRC_FI_B1_L */
+ *ret = s->frame_index_f1 & 0xffff;
+ break;
+
+ case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */
+ *ret = s->frame_index_f1 >> 16;
+ break;
+
+ case 0xbdc: /* DMA_LCD_SRC_EI_B2 */
+ *ret = s->element_index_f2;
+ break;
+
+ case 0xbde: /* DMA_LCD_SRC_FI_B2_L */
+ *ret = s->frame_index_f2 & 0xffff;
+ break;
+
+ case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */
+ *ret = s->frame_index_f2 >> 16;
+ break;
+
+ case 0xbe0: /* DMA_LCD_SRC_EN_B1 */
+ *ret = s->elements_f1;
+ break;
+
+ case 0xbe4: /* DMA_LCD_SRC_FN_B1 */
+ *ret = s->frames_f1;
+ break;
+
+ case 0xbe2: /* DMA_LCD_SRC_EN_B2 */
+ *ret = s->elements_f2;
+ break;
+
+ case 0xbe6: /* DMA_LCD_SRC_FN_B2 */
+ *ret = s->frames_f2;
+ break;
+
+ case 0xbea: /* DMA_LCD_LCH_CTRL */
+ *ret = s->lch_type;
+ break;
+
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+static int omap_dma_3_1_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
+ uint16_t value)
+{
+ switch (offset) {
+ case 0x300: /* SYS_DMA_LCD_CTRL */
+ s->src = (value & 0x40) ? imif : emiff;
+ s->condition = 0;
+ /* Assume no bus errors and thus no BUS_ERROR irq bits. */
+ s->interrupts = (value >> 1) & 1;
+ s->dual = value & 1;
+ break;
+
+ case 0x302: /* SYS_DMA_LCD_TOP_F1_L */
+ s->src_f1_top &= 0xffff0000;
+ s->src_f1_top |= 0x0000ffff & value;
+ break;
+
+ case 0x304: /* SYS_DMA_LCD_TOP_F1_U */
+ s->src_f1_top &= 0x0000ffff;
+ s->src_f1_top |= (uint32_t)value << 16;
+ break;
+
+ case 0x306: /* SYS_DMA_LCD_BOT_F1_L */
+ s->src_f1_bottom &= 0xffff0000;
+ s->src_f1_bottom |= 0x0000ffff & value;
+ break;
+
+ case 0x308: /* SYS_DMA_LCD_BOT_F1_U */
+ s->src_f1_bottom &= 0x0000ffff;
+ s->src_f1_bottom |= (uint32_t)value << 16;
+ break;
+
+ case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */
+ s->src_f2_top &= 0xffff0000;
+ s->src_f2_top |= 0x0000ffff & value;
+ break;
+
+ case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */
+ s->src_f2_top &= 0x0000ffff;
+ s->src_f2_top |= (uint32_t)value << 16;
+ break;
+
+ case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */
+ s->src_f2_bottom &= 0xffff0000;
+ s->src_f2_bottom |= 0x0000ffff & value;
+ break;
+
+ case 0x310: /* SYS_DMA_LCD_BOT_F2_U */
+ s->src_f2_bottom &= 0x0000ffff;
+ s->src_f2_bottom |= (uint32_t)value << 16;
+ break;
+
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+static int omap_dma_3_1_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
+ uint16_t *ret)
+{
+ int i;
+
+ switch (offset) {
+ case 0x300: /* SYS_DMA_LCD_CTRL */
+ i = s->condition;
+ s->condition = 0;
+ qemu_irq_lower(s->irq);
+ *ret = ((s->src == imif) << 6) | (i << 3) |
+ (s->interrupts << 1) | s->dual;
+ break;
+
+ case 0x302: /* SYS_DMA_LCD_TOP_F1_L */
+ *ret = s->src_f1_top & 0xffff;
+ break;
+
+ case 0x304: /* SYS_DMA_LCD_TOP_F1_U */
+ *ret = s->src_f1_top >> 16;
+ break;
+
+ case 0x306: /* SYS_DMA_LCD_BOT_F1_L */
+ *ret = s->src_f1_bottom & 0xffff;
+ break;
+
+ case 0x308: /* SYS_DMA_LCD_BOT_F1_U */
+ *ret = s->src_f1_bottom >> 16;
+ break;
+
+ case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */
+ *ret = s->src_f2_top & 0xffff;
+ break;
+
+ case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */
+ *ret = s->src_f2_top >> 16;
+ break;
+
+ case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */
+ *ret = s->src_f2_bottom & 0xffff;
+ break;
+
+ case 0x310: /* SYS_DMA_LCD_BOT_F2_U */
+ *ret = s->src_f2_bottom >> 16;
+ break;
+
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+static int omap_dma_sys_write(struct omap_dma_s *s, int offset, uint16_t value)
+{
+ switch (offset) {
+ case 0x400: /* SYS_DMA_GCR */
+ s->gcr = value;
+ break;
+
+ case 0x404: /* DMA_GSCR */
+ if (value & 0x8)
+ omap_dma_disable_3_1_mapping(s);
+ else
+ omap_dma_enable_3_1_mapping(s);
+ break;
+
+ case 0x408: /* DMA_GRST */
+ if (value & 0x1)
+ omap_dma_reset(s->dma);
+ break;
+
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+static int omap_dma_sys_read(struct omap_dma_s *s, int offset,
+ uint16_t *ret)
+{
+ switch (offset) {
+ case 0x400: /* SYS_DMA_GCR */
+ *ret = s->gcr;
+ break;
+
+ case 0x404: /* DMA_GSCR */
+ *ret = s->omap_3_1_mapping_disabled << 3;
+ break;
+
+ case 0x408: /* DMA_GRST */
+ *ret = 0;
+ break;
+
+ case 0x442: /* DMA_HW_ID */
+ case 0x444: /* DMA_PCh2_ID */
+ case 0x446: /* DMA_PCh0_ID */
+ case 0x448: /* DMA_PCh1_ID */
+ case 0x44a: /* DMA_PChG_ID */
+ case 0x44c: /* DMA_PChD_ID */
+ *ret = 1;
+ break;
+
+ case 0x44e: /* DMA_CAPS_0_U */
+ *ret = (s->caps[0] >> 16) & 0xffff;
+ break;
+ case 0x450: /* DMA_CAPS_0_L */
+ *ret = (s->caps[0] >> 0) & 0xffff;
+ break;
+
+ case 0x452: /* DMA_CAPS_1_U */
+ *ret = (s->caps[1] >> 16) & 0xffff;
+ break;
+ case 0x454: /* DMA_CAPS_1_L */
+ *ret = (s->caps[1] >> 0) & 0xffff;
+ break;
+
+ case 0x456: /* DMA_CAPS_2 */
+ *ret = s->caps[2];
+ break;
+
+ case 0x458: /* DMA_CAPS_3 */
+ *ret = s->caps[3];
+ break;
+
+ case 0x45a: /* DMA_CAPS_4 */
+ *ret = s->caps[4];
+ break;
+
+ case 0x460: /* DMA_PCh2_SR */
+ case 0x480: /* DMA_PCh0_SR */
+ case 0x482: /* DMA_PCh1_SR */
+ case 0x4c0: /* DMA_PChD_SR_0 */
+ qemu_log_mask(LOG_UNIMP,
+ "%s: Physical Channel Status Registers not implemented\n",
+ __func__);
+ *ret = 0xff;
+ break;
+
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+static uint64_t omap_dma_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ struct omap_dma_s *s = (struct omap_dma_s *) opaque;
+ int reg, ch;
+ uint16_t ret;
+
+ if (size != 2) {
+ return omap_badwidth_read16(opaque, addr);
+ }
+
+ switch (addr) {
+ case 0x300 ... 0x3fe:
+ if (s->model <= omap_dma_3_1 || !s->omap_3_1_mapping_disabled) {
+ if (omap_dma_3_1_lcd_read(&s->lcd_ch, addr, &ret))
+ break;
+ return ret;
+ }
+ /* Fall through. */
+ case 0x000 ... 0x2fe:
+ reg = addr & 0x3f;
+ ch = (addr >> 6) & 0x0f;
+ if (omap_dma_ch_reg_read(s, &s->ch[ch], reg, &ret))
+ break;
+ return ret;
+
+ case 0x404 ... 0x4fe:
+ if (s->model <= omap_dma_3_1)
+ break;
+ /* Fall through. */
+ case 0x400:
+ if (omap_dma_sys_read(s, addr, &ret))
+ break;
+ return ret;
+
+ case 0xb00 ... 0xbfe:
+ if (s->model == omap_dma_3_2 && s->omap_3_1_mapping_disabled) {
+ if (omap_dma_3_2_lcd_read(&s->lcd_ch, addr, &ret))
+ break;
+ return ret;
+ }
+ break;
+ }
+
+ OMAP_BAD_REG(addr);
+ return 0;
+}
+
+static void omap_dma_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ struct omap_dma_s *s = (struct omap_dma_s *) opaque;
+ int reg, ch;
+
+ if (size != 2) {
+ omap_badwidth_write16(opaque, addr, value);
+ return;
+ }
+
+ switch (addr) {
+ case 0x300 ... 0x3fe:
+ if (s->model <= omap_dma_3_1 || !s->omap_3_1_mapping_disabled) {
+ if (omap_dma_3_1_lcd_write(&s->lcd_ch, addr, value))
+ break;
+ return;
+ }
+ /* Fall through. */
+ case 0x000 ... 0x2fe:
+ reg = addr & 0x3f;
+ ch = (addr >> 6) & 0x0f;
+ if (omap_dma_ch_reg_write(s, &s->ch[ch], reg, value))
+ break;
+ return;
+
+ case 0x404 ... 0x4fe:
+ if (s->model <= omap_dma_3_1)
+ break;
+ /* fall through */
+ case 0x400:
+ if (omap_dma_sys_write(s, addr, value))
+ break;
+ return;
+
+ case 0xb00 ... 0xbfe:
+ if (s->model == omap_dma_3_2 && s->omap_3_1_mapping_disabled) {
+ if (omap_dma_3_2_lcd_write(&s->lcd_ch, addr, value))
+ break;
+ return;
+ }
+ break;
+ }
+
+ OMAP_BAD_REG(addr);
+}
+
+static const MemoryRegionOps omap_dma_ops = {
+ .read = omap_dma_read,
+ .write = omap_dma_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void omap_dma_request(void *opaque, int drq, int req)
+{
+ struct omap_dma_s *s = (struct omap_dma_s *) opaque;
+ /* The request pins are level triggered in QEMU. */
+ if (req) {
+ if (~s->dma->drqbmp & (1ULL << drq)) {
+ s->dma->drqbmp |= 1ULL << drq;
+ omap_dma_process_request(s, drq);
+ }
+ } else
+ s->dma->drqbmp &= ~(1ULL << drq);
+}
+
+/* XXX: this won't be needed once soc_dma knows about clocks. */
+static void omap_dma_clk_update(void *opaque, int line, int on)
+{
+ struct omap_dma_s *s = (struct omap_dma_s *) opaque;
+ int i;
+
+ s->dma->freq = omap_clk_getrate(s->clk);
+
+ for (i = 0; i < s->chans; i ++)
+ if (s->ch[i].active)
+ soc_dma_set_request(s->ch[i].dma, on);
+}
+
+static void omap_dma_setcaps(struct omap_dma_s *s)
+{
+ switch (s->model) {
+ default:
+ case omap_dma_3_1:
+ break;
+ case omap_dma_3_2:
+ case omap_dma_4:
+ /* XXX Only available for sDMA */
+ s->caps[0] =
+ (1 << 19) | /* Constant Fill Capability */
+ (1 << 18); /* Transparent BLT Capability */
+ s->caps[1] =
+ (1 << 1); /* 1-bit palettized capability (DMA 3.2 only) */
+ s->caps[2] =
+ (1 << 8) | /* SEPARATE_SRC_AND_DST_INDEX_CPBLTY */
+ (1 << 7) | /* DST_DOUBLE_INDEX_ADRS_CPBLTY */
+ (1 << 6) | /* DST_SINGLE_INDEX_ADRS_CPBLTY */
+ (1 << 5) | /* DST_POST_INCRMNT_ADRS_CPBLTY */
+ (1 << 4) | /* DST_CONST_ADRS_CPBLTY */
+ (1 << 3) | /* SRC_DOUBLE_INDEX_ADRS_CPBLTY */
+ (1 << 2) | /* SRC_SINGLE_INDEX_ADRS_CPBLTY */
+ (1 << 1) | /* SRC_POST_INCRMNT_ADRS_CPBLTY */
+ (1 << 0); /* SRC_CONST_ADRS_CPBLTY */
+ s->caps[3] =
+ (1 << 6) | /* BLOCK_SYNCHR_CPBLTY (DMA 4 only) */
+ (1 << 7) | /* PKT_SYNCHR_CPBLTY (DMA 4 only) */
+ (1 << 5) | /* CHANNEL_CHAINING_CPBLTY */
+ (1 << 4) | /* LCh_INTERLEAVE_CPBLTY */
+ (1 << 3) | /* AUTOINIT_REPEAT_CPBLTY (DMA 3.2 only) */
+ (1 << 2) | /* AUTOINIT_ENDPROG_CPBLTY (DMA 3.2 only) */
+ (1 << 1) | /* FRAME_SYNCHR_CPBLTY */
+ (1 << 0); /* ELMNT_SYNCHR_CPBLTY */
+ s->caps[4] =
+ (1 << 7) | /* PKT_INTERRUPT_CPBLTY (DMA 4 only) */
+ (1 << 6) | /* SYNC_STATUS_CPBLTY */
+ (1 << 5) | /* BLOCK_INTERRUPT_CPBLTY */
+ (1 << 4) | /* LAST_FRAME_INTERRUPT_CPBLTY */
+ (1 << 3) | /* FRAME_INTERRUPT_CPBLTY */
+ (1 << 2) | /* HALF_FRAME_INTERRUPT_CPBLTY */
+ (1 << 1) | /* EVENT_DROP_INTERRUPT_CPBLTY */
+ (1 << 0); /* TIMEOUT_INTERRUPT_CPBLTY (DMA 3.2 only) */
+ break;
+ }
+}
+
+struct soc_dma_s *omap_dma_init(hwaddr base, qemu_irq *irqs,
+ MemoryRegion *sysmem,
+ qemu_irq lcd_irq, struct omap_mpu_state_s *mpu, omap_clk clk,
+ enum omap_dma_model model)
+{
+ int num_irqs, memsize, i;
+ struct omap_dma_s *s = g_new0(struct omap_dma_s, 1);
+
+ if (model <= omap_dma_3_1) {
+ num_irqs = 6;
+ memsize = 0x800;
+ } else {
+ num_irqs = 16;
+ memsize = 0xc00;
+ }
+ s->model = model;
+ s->mpu = mpu;
+ s->clk = clk;
+ s->lcd_ch.irq = lcd_irq;
+ s->lcd_ch.mpu = mpu;
+
+ s->dma = soc_dma_init((model <= omap_dma_3_1) ? 9 : 16);
+ s->dma->freq = omap_clk_getrate(clk);
+ s->dma->transfer_fn = omap_dma_transfer_generic;
+ s->dma->setup_fn = omap_dma_transfer_setup;
+ s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 32);
+ s->dma->opaque = s;
+
+ while (num_irqs --)
+ s->ch[num_irqs].irq = irqs[num_irqs];
+ for (i = 0; i < 3; i ++) {
+ s->ch[i].sibling = &s->ch[i + 6];
+ s->ch[i + 6].sibling = &s->ch[i];
+ }
+ for (i = (model <= omap_dma_3_1) ? 8 : 15; i >= 0; i --) {
+ s->ch[i].dma = &s->dma->ch[i];
+ s->dma->ch[i].opaque = &s->ch[i];
+ }
+
+ omap_dma_setcaps(s);
+ omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0));
+ omap_dma_reset(s->dma);
+ omap_dma_clk_update(s, 0, 1);
+
+ memory_region_init_io(&s->iomem, NULL, &omap_dma_ops, s, "omap.dma", memsize);
+ memory_region_add_subregion(sysmem, base, &s->iomem);
+
+ mpu->drq = s->dma->drq;
+
+ return s->dma;
+}
+
+static void omap_dma_interrupts_4_update(struct omap_dma_s *s)
+{
+ struct omap_dma_channel_s *ch = s->ch;
+ uint32_t bmp, bit;
+
+ for (bmp = 0, bit = 1; bit; ch ++, bit <<= 1)
+ if (ch->status) {
+ bmp |= bit;
+ ch->cstatus |= ch->status;
+ ch->status = 0;
+ }
+ if ((s->irqstat[0] |= s->irqen[0] & bmp))
+ qemu_irq_raise(s->irq[0]);
+ if ((s->irqstat[1] |= s->irqen[1] & bmp))
+ qemu_irq_raise(s->irq[1]);
+ if ((s->irqstat[2] |= s->irqen[2] & bmp))
+ qemu_irq_raise(s->irq[2]);
+ if ((s->irqstat[3] |= s->irqen[3] & bmp))
+ qemu_irq_raise(s->irq[3]);
+}
+
+static uint64_t omap_dma4_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ struct omap_dma_s *s = (struct omap_dma_s *) opaque;
+ int irqn = 0, chnum;
+ struct omap_dma_channel_s *ch;
+
+ if (size == 1) {
+ return omap_badwidth_read16(opaque, addr);
+ }
+
+ switch (addr) {
+ case 0x00: /* DMA4_REVISION */
+ return 0x40;
+
+ case 0x14: /* DMA4_IRQSTATUS_L3 */
+ irqn ++;
+ /* fall through */
+ case 0x10: /* DMA4_IRQSTATUS_L2 */
+ irqn ++;
+ /* fall through */
+ case 0x0c: /* DMA4_IRQSTATUS_L1 */
+ irqn ++;
+ /* fall through */
+ case 0x08: /* DMA4_IRQSTATUS_L0 */
+ return s->irqstat[irqn];
+
+ case 0x24: /* DMA4_IRQENABLE_L3 */
+ irqn ++;
+ /* fall through */
+ case 0x20: /* DMA4_IRQENABLE_L2 */
+ irqn ++;
+ /* fall through */
+ case 0x1c: /* DMA4_IRQENABLE_L1 */
+ irqn ++;
+ /* fall through */
+ case 0x18: /* DMA4_IRQENABLE_L0 */
+ return s->irqen[irqn];
+
+ case 0x28: /* DMA4_SYSSTATUS */
+ return 1; /* RESETDONE */
+
+ case 0x2c: /* DMA4_OCP_SYSCONFIG */
+ return s->ocp;
+
+ case 0x64: /* DMA4_CAPS_0 */
+ return s->caps[0];
+ case 0x6c: /* DMA4_CAPS_2 */
+ return s->caps[2];
+ case 0x70: /* DMA4_CAPS_3 */
+ return s->caps[3];
+ case 0x74: /* DMA4_CAPS_4 */
+ return s->caps[4];
+
+ case 0x78: /* DMA4_GCR */
+ return s->gcr;
+
+ case 0x80 ... 0xfff:
+ addr -= 0x80;
+ chnum = addr / 0x60;
+ ch = s->ch + chnum;
+ addr -= chnum * 0x60;
+ break;
+
+ default:
+ OMAP_BAD_REG(addr);
+ return 0;
+ }
+
+ /* Per-channel registers */
+ switch (addr) {
+ case 0x00: /* DMA4_CCR */
+ return (ch->buf_disable << 25) |
+ (ch->src_sync << 24) |
+ (ch->prefetch << 23) |
+ ((ch->sync & 0x60) << 14) |
+ (ch->bs << 18) |
+ (ch->transparent_copy << 17) |
+ (ch->constant_fill << 16) |
+ (ch->mode[1] << 14) |
+ (ch->mode[0] << 12) |
+ (0 << 10) | (0 << 9) |
+ (ch->suspend << 8) |
+ (ch->enable << 7) |
+ (ch->priority << 6) |
+ (ch->fs << 5) | (ch->sync & 0x1f);
+
+ case 0x04: /* DMA4_CLNK_CTRL */
+ return (ch->link_enabled << 15) | ch->link_next_ch;
+
+ case 0x08: /* DMA4_CICR */
+ return ch->interrupts;
+
+ case 0x0c: /* DMA4_CSR */
+ return ch->cstatus;
+
+ case 0x10: /* DMA4_CSDP */
+ return (ch->endian[0] << 21) |
+ (ch->endian_lock[0] << 20) |
+ (ch->endian[1] << 19) |
+ (ch->endian_lock[1] << 18) |
+ (ch->write_mode << 16) |
+ (ch->burst[1] << 14) |
+ (ch->pack[1] << 13) |
+ (ch->translate[1] << 9) |
+ (ch->burst[0] << 7) |
+ (ch->pack[0] << 6) |
+ (ch->translate[0] << 2) |
+ (ch->data_type >> 1);
+
+ case 0x14: /* DMA4_CEN */
+ return ch->elements;
+
+ case 0x18: /* DMA4_CFN */
+ return ch->frames;
+
+ case 0x1c: /* DMA4_CSSA */
+ return ch->addr[0];
+
+ case 0x20: /* DMA4_CDSA */
+ return ch->addr[1];
+
+ case 0x24: /* DMA4_CSEI */
+ return ch->element_index[0];
+
+ case 0x28: /* DMA4_CSFI */
+ return ch->frame_index[0];
+
+ case 0x2c: /* DMA4_CDEI */
+ return ch->element_index[1];
+
+ case 0x30: /* DMA4_CDFI */
+ return ch->frame_index[1];
+
+ case 0x34: /* DMA4_CSAC */
+ return ch->active_set.src & 0xffff;
+
+ case 0x38: /* DMA4_CDAC */
+ return ch->active_set.dest & 0xffff;
+
+ case 0x3c: /* DMA4_CCEN */
+ return ch->active_set.element;
+
+ case 0x40: /* DMA4_CCFN */
+ return ch->active_set.frame;
+
+ case 0x44: /* DMA4_COLOR */
+ /* XXX only in sDMA */
+ return ch->color;
+
+ default:
+ OMAP_BAD_REG(addr);
+ return 0;
+ }
+}
+
+static void omap_dma4_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ struct omap_dma_s *s = (struct omap_dma_s *) opaque;
+ int chnum, irqn = 0;
+ struct omap_dma_channel_s *ch;
+
+ if (size == 1) {
+ omap_badwidth_write16(opaque, addr, value);
+ return;
+ }
+
+ switch (addr) {
+ case 0x14: /* DMA4_IRQSTATUS_L3 */
+ irqn ++;
+ /* fall through */
+ case 0x10: /* DMA4_IRQSTATUS_L2 */
+ irqn ++;
+ /* fall through */
+ case 0x0c: /* DMA4_IRQSTATUS_L1 */
+ irqn ++;
+ /* fall through */
+ case 0x08: /* DMA4_IRQSTATUS_L0 */
+ s->irqstat[irqn] &= ~value;
+ if (!s->irqstat[irqn])
+ qemu_irq_lower(s->irq[irqn]);
+ return;
+
+ case 0x24: /* DMA4_IRQENABLE_L3 */
+ irqn ++;
+ /* fall through */
+ case 0x20: /* DMA4_IRQENABLE_L2 */
+ irqn ++;
+ /* fall through */
+ case 0x1c: /* DMA4_IRQENABLE_L1 */
+ irqn ++;
+ /* fall through */
+ case 0x18: /* DMA4_IRQENABLE_L0 */
+ s->irqen[irqn] = value;
+ return;
+
+ case 0x2c: /* DMA4_OCP_SYSCONFIG */
+ if (value & 2) /* SOFTRESET */
+ omap_dma_reset(s->dma);
+ s->ocp = value & 0x3321;
+ if (((s->ocp >> 12) & 3) == 3) { /* MIDLEMODE */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA power mode\n",
+ __func__);
+ }
+ return;
+
+ case 0x78: /* DMA4_GCR */
+ s->gcr = value & 0x00ff00ff;
+ if ((value & 0xff) == 0x00) { /* MAX_CHANNEL_FIFO_DEPTH */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: wrong FIFO depth in GCR\n",
+ __func__);
+ }
+ return;
+
+ case 0x80 ... 0xfff:
+ addr -= 0x80;
+ chnum = addr / 0x60;
+ ch = s->ch + chnum;
+ addr -= chnum * 0x60;
+ break;
+
+ case 0x00: /* DMA4_REVISION */
+ case 0x28: /* DMA4_SYSSTATUS */
+ case 0x64: /* DMA4_CAPS_0 */
+ case 0x6c: /* DMA4_CAPS_2 */
+ case 0x70: /* DMA4_CAPS_3 */
+ case 0x74: /* DMA4_CAPS_4 */
+ OMAP_RO_REG(addr);
+ return;
+
+ default:
+ OMAP_BAD_REG(addr);
+ return;
+ }
+
+ /* Per-channel registers */
+ switch (addr) {
+ case 0x00: /* DMA4_CCR */
+ ch->buf_disable = (value >> 25) & 1;
+ ch->src_sync = (value >> 24) & 1; /* XXX For CamDMA must be 1 */
+ if (ch->buf_disable && !ch->src_sync) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Buffering disable is not allowed in "
+ "destination synchronised mode\n", __func__);
+ }
+ ch->prefetch = (value >> 23) & 1;
+ ch->bs = (value >> 18) & 1;
+ ch->transparent_copy = (value >> 17) & 1;
+ ch->constant_fill = (value >> 16) & 1;
+ ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14);
+ ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12);
+ ch->suspend = (value & 0x0100) >> 8;
+ ch->priority = (value & 0x0040) >> 6;
+ ch->fs = (value & 0x0020) >> 5;
+ if (ch->fs && ch->bs && ch->mode[0] && ch->mode[1]) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: For a packet transfer at least one port "
+ "must be constant-addressed\n", __func__);
+ }
+ ch->sync = (value & 0x001f) | ((value >> 14) & 0x0060);
+ /* XXX must be 0x01 for CamDMA */
+
+ if (value & 0x0080)
+ omap_dma_enable_channel(s, ch);
+ else
+ omap_dma_disable_channel(s, ch);
+
+ break;
+
+ case 0x04: /* DMA4_CLNK_CTRL */
+ ch->link_enabled = (value >> 15) & 0x1;
+ ch->link_next_ch = value & 0x1f;
+ break;
+
+ case 0x08: /* DMA4_CICR */
+ ch->interrupts = value & 0x09be;
+ break;
+
+ case 0x0c: /* DMA4_CSR */
+ ch->cstatus &= ~value;
+ break;
+
+ case 0x10: /* DMA4_CSDP */
+ ch->endian[0] =(value >> 21) & 1;
+ ch->endian_lock[0] =(value >> 20) & 1;
+ ch->endian[1] =(value >> 19) & 1;
+ ch->endian_lock[1] =(value >> 18) & 1;
+ if (ch->endian[0] != ch->endian[1]) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: DMA endianness conversion enable attempt\n",
+ __func__);
+ }
+ ch->write_mode = (value >> 16) & 3;
+ ch->burst[1] = (value & 0xc000) >> 14;
+ ch->pack[1] = (value & 0x2000) >> 13;
+ ch->translate[1] = (value & 0x1e00) >> 9;
+ ch->burst[0] = (value & 0x0180) >> 7;
+ ch->pack[0] = (value & 0x0040) >> 6;
+ ch->translate[0] = (value & 0x003c) >> 2;
+ if (ch->translate[0] | ch->translate[1]) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: bad MReqAddressTranslate sideband signal\n",
+ __func__);
+ }
+ ch->data_type = 1 << (value & 3);
+ if ((value & 3) == 3) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: bad data_type for DMA channel\n", __func__);
+ ch->data_type >>= 1;
+ }
+ break;
+
+ case 0x14: /* DMA4_CEN */
+ ch->set_update = 1;
+ ch->elements = value & 0xffffff;
+ break;
+
+ case 0x18: /* DMA4_CFN */
+ ch->frames = value & 0xffff;
+ ch->set_update = 1;
+ break;
+
+ case 0x1c: /* DMA4_CSSA */
+ ch->addr[0] = (hwaddr) (uint32_t) value;
+ ch->set_update = 1;
+ break;
+
+ case 0x20: /* DMA4_CDSA */
+ ch->addr[1] = (hwaddr) (uint32_t) value;
+ ch->set_update = 1;
+ break;
+
+ case 0x24: /* DMA4_CSEI */
+ ch->element_index[0] = (int16_t) value;
+ ch->set_update = 1;
+ break;
+
+ case 0x28: /* DMA4_CSFI */
+ ch->frame_index[0] = (int32_t) value;
+ ch->set_update = 1;
+ break;
+
+ case 0x2c: /* DMA4_CDEI */
+ ch->element_index[1] = (int16_t) value;
+ ch->set_update = 1;
+ break;
+
+ case 0x30: /* DMA4_CDFI */
+ ch->frame_index[1] = (int32_t) value;
+ ch->set_update = 1;
+ break;
+
+ case 0x44: /* DMA4_COLOR */
+ /* XXX only in sDMA */
+ ch->color = value;
+ break;
+
+ case 0x34: /* DMA4_CSAC */
+ case 0x38: /* DMA4_CDAC */
+ case 0x3c: /* DMA4_CCEN */
+ case 0x40: /* DMA4_CCFN */
+ OMAP_RO_REG(addr);
+ break;
+
+ default:
+ OMAP_BAD_REG(addr);
+ }
+}
+
+static const MemoryRegionOps omap_dma4_ops = {
+ .read = omap_dma4_read,
+ .write = omap_dma4_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+struct soc_dma_s *omap_dma4_init(hwaddr base, qemu_irq *irqs,
+ MemoryRegion *sysmem,
+ struct omap_mpu_state_s *mpu, int fifo,
+ int chans, omap_clk iclk, omap_clk fclk)
+{
+ int i;
+ struct omap_dma_s *s = g_new0(struct omap_dma_s, 1);
+
+ s->model = omap_dma_4;
+ s->chans = chans;
+ s->mpu = mpu;
+ s->clk = fclk;
+
+ s->dma = soc_dma_init(s->chans);
+ s->dma->freq = omap_clk_getrate(fclk);
+ s->dma->transfer_fn = omap_dma_transfer_generic;
+ s->dma->setup_fn = omap_dma_transfer_setup;
+ s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 64);
+ s->dma->opaque = s;
+ for (i = 0; i < s->chans; i ++) {
+ s->ch[i].dma = &s->dma->ch[i];
+ s->dma->ch[i].opaque = &s->ch[i];
+ }
+
+ memcpy(&s->irq, irqs, sizeof(s->irq));
+ s->intr_update = omap_dma_interrupts_4_update;
+
+ omap_dma_setcaps(s);
+ omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0));
+ omap_dma_reset(s->dma);
+ omap_dma_clk_update(s, 0, !!s->dma->freq);
+
+ memory_region_init_io(&s->iomem, NULL, &omap_dma4_ops, s, "omap.dma4", 0x1000);
+ memory_region_add_subregion(sysmem, base, &s->iomem);
+
+ mpu->drq = s->dma->drq;
+
+ return s->dma;
+}
+
+struct omap_dma_lcd_channel_s *omap_dma_get_lcdch(struct soc_dma_s *dma)
+{
+ struct omap_dma_s *s = dma->opaque;
+
+ return &s->lcd_ch;
+}
diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c
new file mode 100644
index 000000000..2627307cc
--- /dev/null
+++ b/hw/dma/pl080.c
@@ -0,0 +1,449 @@
+/*
+ * Arm PrimeCell PL080/PL081 DMA controller
+ *
+ * Copyright (c) 2006 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GPL.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "hw/dma/pl080.h"
+#include "hw/hw.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "qapi/error.h"
+
+#define PL080_CONF_E 0x1
+#define PL080_CONF_M1 0x2
+#define PL080_CONF_M2 0x4
+
+#define PL080_CCONF_H 0x40000
+#define PL080_CCONF_A 0x20000
+#define PL080_CCONF_L 0x10000
+#define PL080_CCONF_ITC 0x08000
+#define PL080_CCONF_IE 0x04000
+#define PL080_CCONF_E 0x00001
+
+#define PL080_CCTRL_I 0x80000000
+#define PL080_CCTRL_DI 0x08000000
+#define PL080_CCTRL_SI 0x04000000
+#define PL080_CCTRL_D 0x02000000
+#define PL080_CCTRL_S 0x01000000
+
+static const VMStateDescription vmstate_pl080_channel = {
+ .name = "pl080_channel",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(src, pl080_channel),
+ VMSTATE_UINT32(dest, pl080_channel),
+ VMSTATE_UINT32(lli, pl080_channel),
+ VMSTATE_UINT32(ctrl, pl080_channel),
+ VMSTATE_UINT32(conf, pl080_channel),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_pl080 = {
+ .name = "pl080",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(tc_int, PL080State),
+ VMSTATE_UINT8(tc_mask, PL080State),
+ VMSTATE_UINT8(err_int, PL080State),
+ VMSTATE_UINT8(err_mask, PL080State),
+ VMSTATE_UINT32(conf, PL080State),
+ VMSTATE_UINT32(sync, PL080State),
+ VMSTATE_UINT32(req_single, PL080State),
+ VMSTATE_UINT32(req_burst, PL080State),
+ VMSTATE_UINT8(tc_int, PL080State),
+ VMSTATE_UINT8(tc_int, PL080State),
+ VMSTATE_UINT8(tc_int, PL080State),
+ VMSTATE_STRUCT_ARRAY(chan, PL080State, PL080_MAX_CHANNELS,
+ 1, vmstate_pl080_channel, pl080_channel),
+ VMSTATE_INT32(running, PL080State),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const unsigned char pl080_id[] =
+{ 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
+
+static const unsigned char pl081_id[] =
+{ 0x81, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
+
+static void pl080_update(PL080State *s)
+{
+ bool tclevel = (s->tc_int & s->tc_mask);
+ bool errlevel = (s->err_int & s->err_mask);
+
+ qemu_set_irq(s->interr, errlevel);
+ qemu_set_irq(s->inttc, tclevel);
+ qemu_set_irq(s->irq, errlevel || tclevel);
+}
+
+static void pl080_run(PL080State *s)
+{
+ int c;
+ int flow;
+ pl080_channel *ch;
+ int swidth;
+ int dwidth;
+ int xsize;
+ int n;
+ int src_id;
+ int dest_id;
+ int size;
+ uint8_t buff[4];
+ uint32_t req;
+
+ s->tc_mask = 0;
+ for (c = 0; c < s->nchannels; c++) {
+ if (s->chan[c].conf & PL080_CCONF_ITC)
+ s->tc_mask |= 1 << c;
+ if (s->chan[c].conf & PL080_CCONF_IE)
+ s->err_mask |= 1 << c;
+ }
+
+ if ((s->conf & PL080_CONF_E) == 0)
+ return;
+
+ /* If we are already in the middle of a DMA operation then indicate that
+ there may be new DMA requests and return immediately. */
+ if (s->running) {
+ s->running++;
+ return;
+ }
+ s->running = 1;
+ while (s->running) {
+ for (c = 0; c < s->nchannels; c++) {
+ ch = &s->chan[c];
+again:
+ /* Test if thiws channel has any pending DMA requests. */
+ if ((ch->conf & (PL080_CCONF_H | PL080_CCONF_E))
+ != PL080_CCONF_E)
+ continue;
+ flow = (ch->conf >> 11) & 7;
+ if (flow >= 4) {
+ hw_error(
+ "pl080_run: Peripheral flow control not implemented\n");
+ }
+ src_id = (ch->conf >> 1) & 0x1f;
+ dest_id = (ch->conf >> 6) & 0x1f;
+ size = ch->ctrl & 0xfff;
+ req = s->req_single | s->req_burst;
+ switch (flow) {
+ case 0:
+ break;
+ case 1:
+ if ((req & (1u << dest_id)) == 0)
+ size = 0;
+ break;
+ case 2:
+ if ((req & (1u << src_id)) == 0)
+ size = 0;
+ break;
+ case 3:
+ if ((req & (1u << src_id)) == 0
+ || (req & (1u << dest_id)) == 0)
+ size = 0;
+ break;
+ }
+ if (!size)
+ continue;
+
+ /* Transfer one element. */
+ /* ??? Should transfer multiple elements for a burst request. */
+ /* ??? Unclear what the proper behavior is when source and
+ destination widths are different. */
+ swidth = 1 << ((ch->ctrl >> 18) & 7);
+ dwidth = 1 << ((ch->ctrl >> 21) & 7);
+ for (n = 0; n < dwidth; n+= swidth) {
+ address_space_read(&s->downstream_as, ch->src,
+ MEMTXATTRS_UNSPECIFIED, buff + n, swidth);
+ if (ch->ctrl & PL080_CCTRL_SI)
+ ch->src += swidth;
+ }
+ xsize = (dwidth < swidth) ? swidth : dwidth;
+ /* ??? This may pad the value incorrectly for dwidth < 32. */
+ for (n = 0; n < xsize; n += dwidth) {
+ address_space_write(&s->downstream_as, ch->dest + n,
+ MEMTXATTRS_UNSPECIFIED, buff + n, dwidth);
+ if (ch->ctrl & PL080_CCTRL_DI)
+ ch->dest += swidth;
+ }
+
+ size--;
+ ch->ctrl = (ch->ctrl & 0xfffff000) | size;
+ if (size == 0) {
+ /* Transfer complete. */
+ if (ch->lli) {
+ ch->src = address_space_ldl_le(&s->downstream_as,
+ ch->lli,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ ch->dest = address_space_ldl_le(&s->downstream_as,
+ ch->lli + 4,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ ch->ctrl = address_space_ldl_le(&s->downstream_as,
+ ch->lli + 12,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ ch->lli = address_space_ldl_le(&s->downstream_as,
+ ch->lli + 8,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ } else {
+ ch->conf &= ~PL080_CCONF_E;
+ }
+ if (ch->ctrl & PL080_CCTRL_I) {
+ s->tc_int |= 1 << c;
+ }
+ }
+ goto again;
+ }
+ if (--s->running)
+ s->running = 1;
+ }
+}
+
+static uint64_t pl080_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PL080State *s = (PL080State *)opaque;
+ uint32_t i;
+ uint32_t mask;
+
+ if (offset >= 0xfe0 && offset < 0x1000) {
+ if (s->nchannels == 8) {
+ return pl080_id[(offset - 0xfe0) >> 2];
+ } else {
+ return pl081_id[(offset - 0xfe0) >> 2];
+ }
+ }
+ if (offset >= 0x100 && offset < 0x200) {
+ i = (offset & 0xe0) >> 5;
+ if (i >= s->nchannels)
+ goto bad_offset;
+ switch ((offset >> 2) & 7) {
+ case 0: /* SrcAddr */
+ return s->chan[i].src;
+ case 1: /* DestAddr */
+ return s->chan[i].dest;
+ case 2: /* LLI */
+ return s->chan[i].lli;
+ case 3: /* Control */
+ return s->chan[i].ctrl;
+ case 4: /* Configuration */
+ return s->chan[i].conf;
+ default:
+ goto bad_offset;
+ }
+ }
+ switch (offset >> 2) {
+ case 0: /* IntStatus */
+ return (s->tc_int & s->tc_mask) | (s->err_int & s->err_mask);
+ case 1: /* IntTCStatus */
+ return (s->tc_int & s->tc_mask);
+ case 3: /* IntErrorStatus */
+ return (s->err_int & s->err_mask);
+ case 5: /* RawIntTCStatus */
+ return s->tc_int;
+ case 6: /* RawIntErrorStatus */
+ return s->err_int;
+ case 7: /* EnbldChns */
+ mask = 0;
+ for (i = 0; i < s->nchannels; i++) {
+ if (s->chan[i].conf & PL080_CCONF_E)
+ mask |= 1 << i;
+ }
+ return mask;
+ case 8: /* SoftBReq */
+ case 9: /* SoftSReq */
+ case 10: /* SoftLBReq */
+ case 11: /* SoftLSReq */
+ /* ??? Implement these. */
+ return 0;
+ case 12: /* Configuration */
+ return s->conf;
+ case 13: /* Sync */
+ return s->sync;
+ default:
+ bad_offset:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "pl080_read: Bad offset %x\n", (int)offset);
+ return 0;
+ }
+}
+
+static void pl080_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ PL080State *s = (PL080State *)opaque;
+ int i;
+
+ if (offset >= 0x100 && offset < 0x200) {
+ i = (offset & 0xe0) >> 5;
+ if (i >= s->nchannels)
+ goto bad_offset;
+ switch ((offset >> 2) & 7) {
+ case 0: /* SrcAddr */
+ s->chan[i].src = value;
+ break;
+ case 1: /* DestAddr */
+ s->chan[i].dest = value;
+ break;
+ case 2: /* LLI */
+ s->chan[i].lli = value;
+ break;
+ case 3: /* Control */
+ s->chan[i].ctrl = value;
+ break;
+ case 4: /* Configuration */
+ s->chan[i].conf = value;
+ pl080_run(s);
+ break;
+ }
+ return;
+ }
+ switch (offset >> 2) {
+ case 2: /* IntTCClear */
+ s->tc_int &= ~value;
+ break;
+ case 4: /* IntErrorClear */
+ s->err_int &= ~value;
+ break;
+ case 8: /* SoftBReq */
+ case 9: /* SoftSReq */
+ case 10: /* SoftLBReq */
+ case 11: /* SoftLSReq */
+ /* ??? Implement these. */
+ qemu_log_mask(LOG_UNIMP, "pl080_write: Soft DMA not implemented\n");
+ break;
+ case 12: /* Configuration */
+ s->conf = value;
+ if (s->conf & (PL080_CONF_M1 | PL080_CONF_M2)) {
+ qemu_log_mask(LOG_UNIMP,
+ "pl080_write: Big-endian DMA not implemented\n");
+ }
+ pl080_run(s);
+ break;
+ case 13: /* Sync */
+ s->sync = value;
+ break;
+ default:
+ bad_offset:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "pl080_write: Bad offset %x\n", (int)offset);
+ }
+ pl080_update(s);
+}
+
+static const MemoryRegionOps pl080_ops = {
+ .read = pl080_read,
+ .write = pl080_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void pl080_reset(DeviceState *dev)
+{
+ PL080State *s = PL080(dev);
+ int i;
+
+ s->tc_int = 0;
+ s->tc_mask = 0;
+ s->err_int = 0;
+ s->err_mask = 0;
+ s->conf = 0;
+ s->sync = 0;
+ s->req_single = 0;
+ s->req_burst = 0;
+ s->running = 0;
+
+ for (i = 0; i < s->nchannels; i++) {
+ s->chan[i].src = 0;
+ s->chan[i].dest = 0;
+ s->chan[i].lli = 0;
+ s->chan[i].ctrl = 0;
+ s->chan[i].conf = 0;
+ }
+}
+
+static void pl080_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ PL080State *s = PL080(obj);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &pl080_ops, s, "pl080", 0x1000);
+ sysbus_init_mmio(sbd, &s->iomem);
+ sysbus_init_irq(sbd, &s->irq);
+ sysbus_init_irq(sbd, &s->interr);
+ sysbus_init_irq(sbd, &s->inttc);
+ s->nchannels = 8;
+}
+
+static void pl080_realize(DeviceState *dev, Error **errp)
+{
+ PL080State *s = PL080(dev);
+
+ if (!s->downstream) {
+ error_setg(errp, "PL080 'downstream' link not set");
+ return;
+ }
+
+ address_space_init(&s->downstream_as, s->downstream, "pl080-downstream");
+}
+
+static void pl081_init(Object *obj)
+{
+ PL080State *s = PL080(obj);
+
+ s->nchannels = 2;
+}
+
+static Property pl080_properties[] = {
+ DEFINE_PROP_LINK("downstream", PL080State, downstream,
+ TYPE_MEMORY_REGION, MemoryRegion *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pl080_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->vmsd = &vmstate_pl080;
+ dc->realize = pl080_realize;
+ device_class_set_props(dc, pl080_properties);
+ dc->reset = pl080_reset;
+}
+
+static const TypeInfo pl080_info = {
+ .name = TYPE_PL080,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(PL080State),
+ .instance_init = pl080_init,
+ .class_init = pl080_class_init,
+};
+
+static const TypeInfo pl081_info = {
+ .name = TYPE_PL081,
+ .parent = TYPE_PL080,
+ .instance_init = pl081_init,
+};
+
+/* The PL080 and PL081 are the same except for the number of channels
+ they implement (8 and 2 respectively). */
+static void pl080_register_types(void)
+{
+ type_register_static(&pl080_info);
+ type_register_static(&pl081_info);
+}
+
+type_init(pl080_register_types)
diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c
new file mode 100644
index 000000000..0cb46191c
--- /dev/null
+++ b/hw/dma/pl330.c
@@ -0,0 +1,1702 @@
+/*
+ * ARM PrimeCell PL330 DMA Controller
+ *
+ * Copyright (c) 2009 Samsung Electronics.
+ * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
+ * Copyright (c) 2012 Peter A.G. Crosthwaite (peter.crosthwaite@petalogix.com)
+ * Copyright (c) 2012 PetaLogix Pty Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 or later.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+#include "qemu/timer.h"
+#include "sysemu/dma.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "trace.h"
+#include "qom/object.h"
+
+#ifndef PL330_ERR_DEBUG
+#define PL330_ERR_DEBUG 0
+#endif
+
+#define PL330_PERIPH_NUM 32
+#define PL330_MAX_BURST_LEN 128
+#define PL330_INSN_MAXSIZE 6
+
+#define PL330_FIFO_OK 0
+#define PL330_FIFO_STALL 1
+#define PL330_FIFO_ERR (-1)
+
+#define PL330_FAULT_UNDEF_INSTR (1 << 0)
+#define PL330_FAULT_OPERAND_INVALID (1 << 1)
+#define PL330_FAULT_DMAGO_ERR (1 << 4)
+#define PL330_FAULT_EVENT_ERR (1 << 5)
+#define PL330_FAULT_CH_PERIPH_ERR (1 << 6)
+#define PL330_FAULT_CH_RDWR_ERR (1 << 7)
+#define PL330_FAULT_ST_DATA_UNAVAILABLE (1 << 12)
+#define PL330_FAULT_FIFOEMPTY_ERR (1 << 13)
+#define PL330_FAULT_INSTR_FETCH_ERR (1 << 16)
+#define PL330_FAULT_DATA_WRITE_ERR (1 << 17)
+#define PL330_FAULT_DATA_READ_ERR (1 << 18)
+#define PL330_FAULT_DBG_INSTR (1 << 30)
+#define PL330_FAULT_LOCKUP_ERR (1 << 31)
+
+#define PL330_UNTAGGED 0xff
+
+#define PL330_SINGLE 0x0
+#define PL330_BURST 0x1
+
+#define PL330_WATCHDOG_LIMIT 1024
+
+/* IOMEM mapped registers */
+#define PL330_REG_DSR 0x000
+#define PL330_REG_DPC 0x004
+#define PL330_REG_INTEN 0x020
+#define PL330_REG_INT_EVENT_RIS 0x024
+#define PL330_REG_INTMIS 0x028
+#define PL330_REG_INTCLR 0x02C
+#define PL330_REG_FSRD 0x030
+#define PL330_REG_FSRC 0x034
+#define PL330_REG_FTRD 0x038
+#define PL330_REG_FTR_BASE 0x040
+#define PL330_REG_CSR_BASE 0x100
+#define PL330_REG_CPC_BASE 0x104
+#define PL330_REG_CHANCTRL 0x400
+#define PL330_REG_DBGSTATUS 0xD00
+#define PL330_REG_DBGCMD 0xD04
+#define PL330_REG_DBGINST0 0xD08
+#define PL330_REG_DBGINST1 0xD0C
+#define PL330_REG_CR0_BASE 0xE00
+#define PL330_REG_PERIPH_ID 0xFE0
+
+#define PL330_IOMEM_SIZE 0x1000
+
+#define CFG_BOOT_ADDR 2
+#define CFG_INS 3
+#define CFG_PNS 4
+#define CFG_CRD 5
+
+static const uint32_t pl330_id[] = {
+ 0x30, 0x13, 0x24, 0x00, 0x0D, 0xF0, 0x05, 0xB1
+};
+
+/* DMA channel states as they are described in PL330 Technical Reference Manual
+ * Most of them will not be used in emulation.
+ */
+typedef enum {
+ pl330_chan_stopped = 0,
+ pl330_chan_executing = 1,
+ pl330_chan_cache_miss = 2,
+ pl330_chan_updating_pc = 3,
+ pl330_chan_waiting_event = 4,
+ pl330_chan_at_barrier = 5,
+ pl330_chan_queue_busy = 6,
+ pl330_chan_waiting_periph = 7,
+ pl330_chan_killing = 8,
+ pl330_chan_completing = 9,
+ pl330_chan_fault_completing = 14,
+ pl330_chan_fault = 15,
+} PL330ChanState;
+
+typedef struct PL330State PL330State;
+
+typedef struct PL330Chan {
+ uint32_t src;
+ uint32_t dst;
+ uint32_t pc;
+ uint32_t control;
+ uint32_t status;
+ uint32_t lc[2];
+ uint32_t fault_type;
+ uint32_t watchdog_timer;
+
+ bool ns;
+ uint8_t request_flag;
+ uint8_t wakeup;
+ uint8_t wfp_sbp;
+
+ uint8_t state;
+ uint8_t stall;
+
+ bool is_manager;
+ PL330State *parent;
+ uint8_t tag;
+} PL330Chan;
+
+static const VMStateDescription vmstate_pl330_chan = {
+ .name = "pl330_chan",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(src, PL330Chan),
+ VMSTATE_UINT32(dst, PL330Chan),
+ VMSTATE_UINT32(pc, PL330Chan),
+ VMSTATE_UINT32(control, PL330Chan),
+ VMSTATE_UINT32(status, PL330Chan),
+ VMSTATE_UINT32_ARRAY(lc, PL330Chan, 2),
+ VMSTATE_UINT32(fault_type, PL330Chan),
+ VMSTATE_UINT32(watchdog_timer, PL330Chan),
+ VMSTATE_BOOL(ns, PL330Chan),
+ VMSTATE_UINT8(request_flag, PL330Chan),
+ VMSTATE_UINT8(wakeup, PL330Chan),
+ VMSTATE_UINT8(wfp_sbp, PL330Chan),
+ VMSTATE_UINT8(state, PL330Chan),
+ VMSTATE_UINT8(stall, PL330Chan),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+typedef struct PL330Fifo {
+ uint8_t *buf;
+ uint8_t *tag;
+ uint32_t head;
+ uint32_t num;
+ uint32_t buf_size;
+} PL330Fifo;
+
+static const VMStateDescription vmstate_pl330_fifo = {
+ .name = "pl330_chan",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_VBUFFER_UINT32(buf, PL330Fifo, 1, NULL, buf_size),
+ VMSTATE_VBUFFER_UINT32(tag, PL330Fifo, 1, NULL, buf_size),
+ VMSTATE_UINT32(head, PL330Fifo),
+ VMSTATE_UINT32(num, PL330Fifo),
+ VMSTATE_UINT32(buf_size, PL330Fifo),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+typedef struct PL330QueueEntry {
+ uint32_t addr;
+ uint32_t len;
+ uint8_t n;
+ bool inc;
+ bool z;
+ uint8_t tag;
+ uint8_t seqn;
+} PL330QueueEntry;
+
+static const VMStateDescription vmstate_pl330_queue_entry = {
+ .name = "pl330_queue_entry",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(addr, PL330QueueEntry),
+ VMSTATE_UINT32(len, PL330QueueEntry),
+ VMSTATE_UINT8(n, PL330QueueEntry),
+ VMSTATE_BOOL(inc, PL330QueueEntry),
+ VMSTATE_BOOL(z, PL330QueueEntry),
+ VMSTATE_UINT8(tag, PL330QueueEntry),
+ VMSTATE_UINT8(seqn, PL330QueueEntry),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+typedef struct PL330Queue {
+ PL330State *parent;
+ PL330QueueEntry *queue;
+ uint32_t queue_size;
+} PL330Queue;
+
+static const VMStateDescription vmstate_pl330_queue = {
+ .name = "pl330_queue",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_VARRAY_POINTER_UINT32(queue, PL330Queue, queue_size,
+ vmstate_pl330_queue_entry,
+ PL330QueueEntry),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+struct PL330State {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ qemu_irq irq_abort;
+ qemu_irq *irq;
+
+ /* Config registers. cfg[5] = CfgDn. */
+ uint32_t cfg[6];
+#define EVENT_SEC_STATE 3
+#define PERIPH_SEC_STATE 4
+ /* cfg 0 bits and pieces */
+ uint32_t num_chnls;
+ uint8_t num_periph_req;
+ uint8_t num_events;
+ uint8_t mgr_ns_at_rst;
+ /* cfg 1 bits and pieces */
+ uint8_t i_cache_len;
+ uint8_t num_i_cache_lines;
+ /* CRD bits and pieces */
+ uint8_t data_width;
+ uint8_t wr_cap;
+ uint8_t wr_q_dep;
+ uint8_t rd_cap;
+ uint8_t rd_q_dep;
+ uint16_t data_buffer_dep;
+
+ PL330Chan manager;
+ PL330Chan *chan;
+ PL330Fifo fifo;
+ PL330Queue read_queue;
+ PL330Queue write_queue;
+ uint8_t *lo_seqn;
+ uint8_t *hi_seqn;
+ QEMUTimer *timer; /* is used for restore dma. */
+
+ uint32_t inten;
+ uint32_t int_status;
+ uint32_t ev_status;
+ uint32_t dbg[2];
+ uint8_t debug_status;
+ uint8_t num_faulting;
+ uint8_t periph_busy[PL330_PERIPH_NUM];
+
+ /* Memory region that DMA operation access */
+ MemoryRegion *mem_mr;
+ AddressSpace *mem_as;
+};
+
+#define TYPE_PL330 "pl330"
+OBJECT_DECLARE_SIMPLE_TYPE(PL330State, PL330)
+
+static const VMStateDescription vmstate_pl330 = {
+ .name = "pl330",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT(manager, PL330State, 0, vmstate_pl330_chan, PL330Chan),
+ VMSTATE_STRUCT_VARRAY_POINTER_UINT32(chan, PL330State, num_chnls,
+ vmstate_pl330_chan, PL330Chan),
+ VMSTATE_VBUFFER_UINT32(lo_seqn, PL330State, 1, NULL, num_chnls),
+ VMSTATE_VBUFFER_UINT32(hi_seqn, PL330State, 1, NULL, num_chnls),
+ VMSTATE_STRUCT(fifo, PL330State, 0, vmstate_pl330_fifo, PL330Fifo),
+ VMSTATE_STRUCT(read_queue, PL330State, 0, vmstate_pl330_queue,
+ PL330Queue),
+ VMSTATE_STRUCT(write_queue, PL330State, 0, vmstate_pl330_queue,
+ PL330Queue),
+ VMSTATE_TIMER_PTR(timer, PL330State),
+ VMSTATE_UINT32(inten, PL330State),
+ VMSTATE_UINT32(int_status, PL330State),
+ VMSTATE_UINT32(ev_status, PL330State),
+ VMSTATE_UINT32_ARRAY(dbg, PL330State, 2),
+ VMSTATE_UINT8(debug_status, PL330State),
+ VMSTATE_UINT8(num_faulting, PL330State),
+ VMSTATE_UINT8_ARRAY(periph_busy, PL330State, PL330_PERIPH_NUM),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+typedef struct PL330InsnDesc {
+ /* OPCODE of the instruction */
+ uint8_t opcode;
+ /* Mask so we can select several sibling instructions, such as
+ DMALD, DMALDS and DMALDB */
+ uint8_t opmask;
+ /* Size of instruction in bytes */
+ uint8_t size;
+ /* Interpreter */
+ void (*exec)(PL330Chan *, uint8_t opcode, uint8_t *args, int len);
+} PL330InsnDesc;
+
+static void pl330_hexdump(uint8_t *buf, size_t size)
+{
+ unsigned int b, i, len;
+ char tmpbuf[80];
+
+ for (b = 0; b < size; b += 16) {
+ len = size - b;
+ if (len > 16) {
+ len = 16;
+ }
+ tmpbuf[0] = '\0';
+ for (i = 0; i < len; i++) {
+ if ((i % 4) == 0) {
+ strcat(tmpbuf, " ");
+ }
+ sprintf(tmpbuf + strlen(tmpbuf), " %02x", buf[b + i]);
+ }
+ trace_pl330_hexdump(b, tmpbuf);
+ }
+}
+
+/* MFIFO Implementation
+ *
+ * MFIFO is implemented as a cyclic buffer of BUF_SIZE size. Tagged bytes are
+ * stored in this buffer. Data is stored in BUF field, tags - in the
+ * corresponding array elements of TAG field.
+ */
+
+/* Initialize queue. */
+
+static void pl330_fifo_init(PL330Fifo *s, uint32_t size)
+{
+ s->buf = g_malloc0(size);
+ s->tag = g_malloc0(size);
+ s->buf_size = size;
+}
+
+/* Cyclic increment */
+
+static inline int pl330_fifo_inc(PL330Fifo *s, int x)
+{
+ return (x + 1) % s->buf_size;
+}
+
+/* Number of empty bytes in MFIFO */
+
+static inline int pl330_fifo_num_free(PL330Fifo *s)
+{
+ return s->buf_size - s->num;
+}
+
+/* Push LEN bytes of data stored in BUF to MFIFO and tag it with TAG.
+ * Zero returned on success, PL330_FIFO_STALL if there is no enough free
+ * space in MFIFO to store requested amount of data. If push was unsuccessful
+ * no data is stored to MFIFO.
+ */
+
+static int pl330_fifo_push(PL330Fifo *s, uint8_t *buf, int len, uint8_t tag)
+{
+ int i;
+
+ if (s->buf_size - s->num < len) {
+ return PL330_FIFO_STALL;
+ }
+ for (i = 0; i < len; i++) {
+ int push_idx = (s->head + s->num + i) % s->buf_size;
+ s->buf[push_idx] = buf[i];
+ s->tag[push_idx] = tag;
+ }
+ s->num += len;
+ return PL330_FIFO_OK;
+}
+
+/* Get LEN bytes of data from MFIFO and store it to BUF. Tag value of each
+ * byte is verified. Zero returned on success, PL330_FIFO_ERR on tag mismatch
+ * and PL330_FIFO_STALL if there is no enough data in MFIFO. If get was
+ * unsuccessful no data is removed from MFIFO.
+ */
+
+static int pl330_fifo_get(PL330Fifo *s, uint8_t *buf, int len, uint8_t tag)
+{
+ int i;
+
+ if (s->num < len) {
+ return PL330_FIFO_STALL;
+ }
+ for (i = 0; i < len; i++) {
+ if (s->tag[s->head] == tag) {
+ int get_idx = (s->head + i) % s->buf_size;
+ buf[i] = s->buf[get_idx];
+ } else { /* Tag mismatch - Rollback transaction */
+ return PL330_FIFO_ERR;
+ }
+ }
+ s->head = (s->head + len) % s->buf_size;
+ s->num -= len;
+ return PL330_FIFO_OK;
+}
+
+/* Reset MFIFO. This completely erases all data in it. */
+
+static inline void pl330_fifo_reset(PL330Fifo *s)
+{
+ s->head = 0;
+ s->num = 0;
+}
+
+/* Return tag of the first byte stored in MFIFO. If MFIFO is empty
+ * PL330_UNTAGGED is returned.
+ */
+
+static inline uint8_t pl330_fifo_tag(PL330Fifo *s)
+{
+ return (!s->num) ? PL330_UNTAGGED : s->tag[s->head];
+}
+
+/* Returns non-zero if tag TAG is present in fifo or zero otherwise */
+
+static int pl330_fifo_has_tag(PL330Fifo *s, uint8_t tag)
+{
+ int i, n;
+
+ i = s->head;
+ for (n = 0; n < s->num; n++) {
+ if (s->tag[i] == tag) {
+ return 1;
+ }
+ i = pl330_fifo_inc(s, i);
+ }
+ return 0;
+}
+
+/* Remove all entry tagged with TAG from MFIFO */
+
+static void pl330_fifo_tagged_remove(PL330Fifo *s, uint8_t tag)
+{
+ int i, t, n;
+
+ t = i = s->head;
+ for (n = 0; n < s->num; n++) {
+ if (s->tag[i] != tag) {
+ s->buf[t] = s->buf[i];
+ s->tag[t] = s->tag[i];
+ t = pl330_fifo_inc(s, t);
+ } else {
+ s->num = s->num - 1;
+ }
+ i = pl330_fifo_inc(s, i);
+ }
+}
+
+/* Read-Write Queue implementation
+ *
+ * A Read-Write Queue stores up to QUEUE_SIZE instructions (loads or stores).
+ * Each instruction is described by source (for loads) or destination (for
+ * stores) address ADDR, width of data to be loaded/stored LEN, number of
+ * stores/loads to be performed N, INC bit, Z bit and TAG to identify channel
+ * this instruction belongs to. Queue does not store any information about
+ * nature of the instruction: is it load or store. PL330 has different queues
+ * for loads and stores so this is already known at the top level where it
+ * matters.
+ *
+ * Queue works as FIFO for instructions with equivalent tags, but can issue
+ * instructions with different tags in arbitrary order. SEQN field attached to
+ * each instruction helps to achieve this. For each TAG queue contains
+ * instructions with consecutive SEQN values ranging from LO_SEQN[TAG] to
+ * HI_SEQN[TAG]-1 inclusive. SEQN is 8-bit unsigned integer, so SEQN=255 is
+ * followed by SEQN=0.
+ *
+ * Z bit indicates that zeroes should be stored. No MFIFO fetches are performed
+ * in this case.
+ */
+
+static void pl330_queue_reset(PL330Queue *s)
+{
+ int i;
+
+ for (i = 0; i < s->queue_size; i++) {
+ s->queue[i].tag = PL330_UNTAGGED;
+ }
+}
+
+/* Initialize queue */
+static void pl330_queue_init(PL330Queue *s, int size, PL330State *parent)
+{
+ s->parent = parent;
+ s->queue = g_new0(PL330QueueEntry, size);
+ s->queue_size = size;
+}
+
+/* Returns pointer to an empty slot or NULL if queue is full */
+static PL330QueueEntry *pl330_queue_find_empty(PL330Queue *s)
+{
+ int i;
+
+ for (i = 0; i < s->queue_size; i++) {
+ if (s->queue[i].tag == PL330_UNTAGGED) {
+ return &s->queue[i];
+ }
+ }
+ return NULL;
+}
+
+/* Put instruction in queue.
+ * Return value:
+ * - zero - OK
+ * - non-zero - queue is full
+ */
+
+static int pl330_queue_put_insn(PL330Queue *s, uint32_t addr,
+ int len, int n, bool inc, bool z, uint8_t tag)
+{
+ PL330QueueEntry *entry = pl330_queue_find_empty(s);
+
+ if (!entry) {
+ return 1;
+ }
+ entry->tag = tag;
+ entry->addr = addr;
+ entry->len = len;
+ entry->n = n;
+ entry->z = z;
+ entry->inc = inc;
+ entry->seqn = s->parent->hi_seqn[tag];
+ s->parent->hi_seqn[tag]++;
+ return 0;
+}
+
+/* Returns a pointer to queue slot containing instruction which satisfies
+ * following conditions:
+ * - it has valid tag value (not PL330_UNTAGGED)
+ * - if enforce_seq is set it has to be issuable without violating queue
+ * logic (see above)
+ * - if TAG argument is not PL330_UNTAGGED this instruction has tag value
+ * equivalent to the argument TAG value.
+ * If such instruction cannot be found NULL is returned.
+ */
+
+static PL330QueueEntry *pl330_queue_find_insn(PL330Queue *s, uint8_t tag,
+ bool enforce_seq)
+{
+ int i;
+
+ for (i = 0; i < s->queue_size; i++) {
+ if (s->queue[i].tag != PL330_UNTAGGED) {
+ if ((!enforce_seq ||
+ s->queue[i].seqn == s->parent->lo_seqn[s->queue[i].tag]) &&
+ (s->queue[i].tag == tag || tag == PL330_UNTAGGED ||
+ s->queue[i].z)) {
+ return &s->queue[i];
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Removes instruction from queue. */
+
+static inline void pl330_queue_remove_insn(PL330Queue *s, PL330QueueEntry *e)
+{
+ s->parent->lo_seqn[e->tag]++;
+ e->tag = PL330_UNTAGGED;
+}
+
+/* Removes all instructions tagged with TAG from queue. */
+
+static inline void pl330_queue_remove_tagged(PL330Queue *s, uint8_t tag)
+{
+ int i;
+
+ for (i = 0; i < s->queue_size; i++) {
+ if (s->queue[i].tag == tag) {
+ s->queue[i].tag = PL330_UNTAGGED;
+ }
+ }
+}
+
+/* DMA instruction execution engine */
+
+/* Moves DMA channel to the FAULT state and updates it's status. */
+
+static inline void pl330_fault(PL330Chan *ch, uint32_t flags)
+{
+ trace_pl330_fault(ch, flags);
+ ch->fault_type |= flags;
+ if (ch->state == pl330_chan_fault) {
+ return;
+ }
+ ch->state = pl330_chan_fault;
+ ch->parent->num_faulting++;
+ if (ch->parent->num_faulting == 1) {
+ trace_pl330_fault_abort();
+ qemu_irq_raise(ch->parent->irq_abort);
+ }
+}
+
+/*
+ * For information about instructions see PL330 Technical Reference Manual.
+ *
+ * Arguments:
+ * CH - channel executing the instruction
+ * OPCODE - opcode
+ * ARGS - array of 8-bit arguments
+ * LEN - number of elements in ARGS array
+ */
+
+static void pl330_dmaadxh(PL330Chan *ch, uint8_t *args, bool ra, bool neg)
+{
+ uint32_t im = (args[1] << 8) | args[0];
+ if (neg) {
+ im |= 0xffffu << 16;
+ }
+
+ if (ch->is_manager) {
+ pl330_fault(ch, PL330_FAULT_UNDEF_INSTR);
+ return;
+ }
+ if (ra) {
+ ch->dst += im;
+ } else {
+ ch->src += im;
+ }
+}
+
+static void pl330_dmaaddh(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+ pl330_dmaadxh(ch, args, extract32(opcode, 1, 1), false);
+}
+
+static void pl330_dmaadnh(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+ pl330_dmaadxh(ch, args, extract32(opcode, 1, 1), true);
+}
+
+static void pl330_dmaend(PL330Chan *ch, uint8_t opcode,
+ uint8_t *args, int len)
+{
+ PL330State *s = ch->parent;
+
+ if (ch->state == pl330_chan_executing && !ch->is_manager) {
+ /* Wait for all transfers to complete */
+ if (pl330_fifo_has_tag(&s->fifo, ch->tag) ||
+ pl330_queue_find_insn(&s->read_queue, ch->tag, false) != NULL ||
+ pl330_queue_find_insn(&s->write_queue, ch->tag, false) != NULL) {
+
+ ch->stall = 1;
+ return;
+ }
+ }
+ trace_pl330_dmaend();
+ pl330_fifo_tagged_remove(&s->fifo, ch->tag);
+ pl330_queue_remove_tagged(&s->read_queue, ch->tag);
+ pl330_queue_remove_tagged(&s->write_queue, ch->tag);
+ ch->state = pl330_chan_stopped;
+}
+
+static void pl330_dmaflushp(PL330Chan *ch, uint8_t opcode,
+ uint8_t *args, int len)
+{
+ uint8_t periph_id;
+
+ if (args[0] & 7) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ periph_id = (args[0] >> 3) & 0x1f;
+ if (periph_id >= ch->parent->num_periph_req) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ if (ch->ns && !(ch->parent->cfg[CFG_PNS] & (1 << periph_id))) {
+ pl330_fault(ch, PL330_FAULT_CH_PERIPH_ERR);
+ return;
+ }
+ /* Do nothing */
+}
+
+static void pl330_dmago(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+ uint8_t chan_id;
+ uint8_t ns;
+ uint32_t pc;
+ PL330Chan *s;
+
+ trace_pl330_dmago();
+
+ if (!ch->is_manager) {
+ pl330_fault(ch, PL330_FAULT_UNDEF_INSTR);
+ return;
+ }
+ ns = !!(opcode & 2);
+ chan_id = args[0] & 7;
+ if ((args[0] >> 3)) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ if (chan_id >= ch->parent->num_chnls) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ pc = (((uint32_t)args[4]) << 24) | (((uint32_t)args[3]) << 16) |
+ (((uint32_t)args[2]) << 8) | (((uint32_t)args[1]));
+ if (ch->parent->chan[chan_id].state != pl330_chan_stopped) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ if (ch->ns && !ns) {
+ pl330_fault(ch, PL330_FAULT_DMAGO_ERR);
+ return;
+ }
+ s = &ch->parent->chan[chan_id];
+ s->ns = ns;
+ s->pc = pc;
+ s->state = pl330_chan_executing;
+}
+
+static void pl330_dmald(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+ uint8_t bs = opcode & 3;
+ uint32_t size, num;
+ bool inc;
+
+ if (bs == 2) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ if ((bs == 1 && ch->request_flag == PL330_BURST) ||
+ (bs == 3 && ch->request_flag == PL330_SINGLE)) {
+ /* Perform NOP */
+ return;
+ }
+ if (bs == 1 && ch->request_flag == PL330_SINGLE) {
+ num = 1;
+ } else {
+ num = ((ch->control >> 4) & 0xf) + 1;
+ }
+ size = (uint32_t)1 << ((ch->control >> 1) & 0x7);
+ inc = !!(ch->control & 1);
+ ch->stall = pl330_queue_put_insn(&ch->parent->read_queue, ch->src,
+ size, num, inc, 0, ch->tag);
+ if (!ch->stall) {
+ trace_pl330_dmald(ch->tag, ch->src, size, num, inc ? 'Y' : 'N');
+ ch->src += inc ? size * num - (ch->src & (size - 1)) : 0;
+ }
+}
+
+static void pl330_dmaldp(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+ uint8_t periph_id;
+
+ if (args[0] & 7) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ periph_id = (args[0] >> 3) & 0x1f;
+ if (periph_id >= ch->parent->num_periph_req) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ if (ch->ns && !(ch->parent->cfg[CFG_PNS] & (1 << periph_id))) {
+ pl330_fault(ch, PL330_FAULT_CH_PERIPH_ERR);
+ return;
+ }
+ pl330_dmald(ch, opcode, args, len);
+}
+
+static void pl330_dmalp(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+ uint8_t lc = (opcode & 2) >> 1;
+
+ ch->lc[lc] = args[0];
+}
+
+static void pl330_dmakill(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+ if (ch->state == pl330_chan_fault ||
+ ch->state == pl330_chan_fault_completing) {
+ /* This is the only way for a channel to leave the faulting state */
+ ch->fault_type = 0;
+ ch->parent->num_faulting--;
+ if (ch->parent->num_faulting == 0) {
+ trace_pl330_dmakill();
+ qemu_irq_lower(ch->parent->irq_abort);
+ }
+ }
+ ch->state = pl330_chan_killing;
+ pl330_fifo_tagged_remove(&ch->parent->fifo, ch->tag);
+ pl330_queue_remove_tagged(&ch->parent->read_queue, ch->tag);
+ pl330_queue_remove_tagged(&ch->parent->write_queue, ch->tag);
+ ch->state = pl330_chan_stopped;
+}
+
+static void pl330_dmalpend(PL330Chan *ch, uint8_t opcode,
+ uint8_t *args, int len)
+{
+ uint8_t nf = (opcode & 0x10) >> 4;
+ uint8_t bs = opcode & 3;
+ uint8_t lc = (opcode & 4) >> 2;
+
+ trace_pl330_dmalpend(nf, bs, lc, ch->lc[lc], ch->request_flag);
+
+ if (bs == 2) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ if ((bs == 1 && ch->request_flag == PL330_BURST) ||
+ (bs == 3 && ch->request_flag == PL330_SINGLE)) {
+ /* Perform NOP */
+ return;
+ }
+ if (!nf || ch->lc[lc]) {
+ if (nf) {
+ ch->lc[lc]--;
+ }
+ trace_pl330_dmalpiter();
+ ch->pc -= args[0];
+ ch->pc -= len + 1;
+ /* "ch->pc -= args[0] + len + 1" is incorrect when args[0] == 256 */
+ } else {
+ trace_pl330_dmalpfallthrough();
+ }
+}
+
+
+static void pl330_dmamov(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+ uint8_t rd = args[0] & 7;
+ uint32_t im;
+
+ if ((args[0] >> 3)) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ im = (((uint32_t)args[4]) << 24) | (((uint32_t)args[3]) << 16) |
+ (((uint32_t)args[2]) << 8) | (((uint32_t)args[1]));
+ switch (rd) {
+ case 0:
+ ch->src = im;
+ break;
+ case 1:
+ ch->control = im;
+ break;
+ case 2:
+ ch->dst = im;
+ break;
+ default:
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+}
+
+static void pl330_dmanop(PL330Chan *ch, uint8_t opcode,
+ uint8_t *args, int len)
+{
+ /* NOP is NOP. */
+}
+
+static void pl330_dmarmb(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+ if (pl330_queue_find_insn(&ch->parent->read_queue, ch->tag, false)) {
+ ch->state = pl330_chan_at_barrier;
+ ch->stall = 1;
+ return;
+ } else {
+ ch->state = pl330_chan_executing;
+ }
+}
+
+static void pl330_dmasev(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+ uint8_t ev_id;
+
+ if (args[0] & 7) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ ev_id = (args[0] >> 3) & 0x1f;
+ if (ev_id >= ch->parent->num_events) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ if (ch->ns && !(ch->parent->cfg[CFG_INS] & (1 << ev_id))) {
+ pl330_fault(ch, PL330_FAULT_EVENT_ERR);
+ return;
+ }
+ if (ch->parent->inten & (1 << ev_id)) {
+ ch->parent->int_status |= (1 << ev_id);
+ trace_pl330_dmasev_evirq(ev_id);
+ qemu_irq_raise(ch->parent->irq[ev_id]);
+ }
+ trace_pl330_dmasev_event(ev_id);
+ ch->parent->ev_status |= (1 << ev_id);
+}
+
+static void pl330_dmast(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len)
+{
+ uint8_t bs = opcode & 3;
+ uint32_t size, num;
+ bool inc;
+
+ if (bs == 2) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ if ((bs == 1 && ch->request_flag == PL330_BURST) ||
+ (bs == 3 && ch->request_flag == PL330_SINGLE)) {
+ /* Perform NOP */
+ return;
+ }
+ num = ((ch->control >> 18) & 0xf) + 1;
+ size = (uint32_t)1 << ((ch->control >> 15) & 0x7);
+ inc = !!((ch->control >> 14) & 1);
+ ch->stall = pl330_queue_put_insn(&ch->parent->write_queue, ch->dst,
+ size, num, inc, 0, ch->tag);
+ if (!ch->stall) {
+ trace_pl330_dmast(ch->tag, ch->dst, size, num, inc ? 'Y' : 'N');
+ ch->dst += inc ? size * num - (ch->dst & (size - 1)) : 0;
+ }
+}
+
+static void pl330_dmastp(PL330Chan *ch, uint8_t opcode,
+ uint8_t *args, int len)
+{
+ uint8_t periph_id;
+
+ if (args[0] & 7) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ periph_id = (args[0] >> 3) & 0x1f;
+ if (periph_id >= ch->parent->num_periph_req) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ if (ch->ns && !(ch->parent->cfg[CFG_PNS] & (1 << periph_id))) {
+ pl330_fault(ch, PL330_FAULT_CH_PERIPH_ERR);
+ return;
+ }
+ pl330_dmast(ch, opcode, args, len);
+}
+
+static void pl330_dmastz(PL330Chan *ch, uint8_t opcode,
+ uint8_t *args, int len)
+{
+ uint32_t size, num;
+ bool inc;
+
+ num = ((ch->control >> 18) & 0xf) + 1;
+ size = (uint32_t)1 << ((ch->control >> 15) & 0x7);
+ inc = !!((ch->control >> 14) & 1);
+ ch->stall = pl330_queue_put_insn(&ch->parent->write_queue, ch->dst,
+ size, num, inc, 1, ch->tag);
+ if (inc) {
+ ch->dst += size * num;
+ }
+}
+
+static void pl330_dmawfe(PL330Chan *ch, uint8_t opcode,
+ uint8_t *args, int len)
+{
+ uint8_t ev_id;
+ int i;
+
+ if (args[0] & 5) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ ev_id = (args[0] >> 3) & 0x1f;
+ if (ev_id >= ch->parent->num_events) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ if (ch->ns && !(ch->parent->cfg[CFG_INS] & (1 << ev_id))) {
+ pl330_fault(ch, PL330_FAULT_EVENT_ERR);
+ return;
+ }
+ ch->wakeup = ev_id;
+ ch->state = pl330_chan_waiting_event;
+ if (~ch->parent->inten & ch->parent->ev_status & 1 << ev_id) {
+ ch->state = pl330_chan_executing;
+ /* If anyone else is currently waiting on the same event, let them
+ * clear the ev_status so they pick up event as well
+ */
+ for (i = 0; i < ch->parent->num_chnls; ++i) {
+ PL330Chan *peer = &ch->parent->chan[i];
+ if (peer->state == pl330_chan_waiting_event &&
+ peer->wakeup == ev_id) {
+ return;
+ }
+ }
+ ch->parent->ev_status &= ~(1 << ev_id);
+ trace_pl330_dmawfe(ev_id);
+ } else {
+ ch->stall = 1;
+ }
+}
+
+static void pl330_dmawfp(PL330Chan *ch, uint8_t opcode,
+ uint8_t *args, int len)
+{
+ uint8_t bs = opcode & 3;
+ uint8_t periph_id;
+
+ if (args[0] & 7) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ periph_id = (args[0] >> 3) & 0x1f;
+ if (periph_id >= ch->parent->num_periph_req) {
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+ if (ch->ns && !(ch->parent->cfg[CFG_PNS] & (1 << periph_id))) {
+ pl330_fault(ch, PL330_FAULT_CH_PERIPH_ERR);
+ return;
+ }
+ switch (bs) {
+ case 0: /* S */
+ ch->request_flag = PL330_SINGLE;
+ ch->wfp_sbp = 0;
+ break;
+ case 1: /* P */
+ ch->request_flag = PL330_BURST;
+ ch->wfp_sbp = 2;
+ break;
+ case 2: /* B */
+ ch->request_flag = PL330_BURST;
+ ch->wfp_sbp = 1;
+ break;
+ default:
+ pl330_fault(ch, PL330_FAULT_OPERAND_INVALID);
+ return;
+ }
+
+ if (ch->parent->periph_busy[periph_id]) {
+ ch->state = pl330_chan_waiting_periph;
+ ch->stall = 1;
+ } else if (ch->state == pl330_chan_waiting_periph) {
+ ch->state = pl330_chan_executing;
+ }
+}
+
+static void pl330_dmawmb(PL330Chan *ch, uint8_t opcode,
+ uint8_t *args, int len)
+{
+ if (pl330_queue_find_insn(&ch->parent->write_queue, ch->tag, false)) {
+ ch->state = pl330_chan_at_barrier;
+ ch->stall = 1;
+ return;
+ } else {
+ ch->state = pl330_chan_executing;
+ }
+}
+
+/* NULL terminated array of the instruction descriptions. */
+static const PL330InsnDesc insn_desc[] = {
+ { .opcode = 0x54, .opmask = 0xFD, .size = 3, .exec = pl330_dmaaddh, },
+ { .opcode = 0x5c, .opmask = 0xFD, .size = 3, .exec = pl330_dmaadnh, },
+ { .opcode = 0x00, .opmask = 0xFF, .size = 1, .exec = pl330_dmaend, },
+ { .opcode = 0x35, .opmask = 0xFF, .size = 2, .exec = pl330_dmaflushp, },
+ { .opcode = 0xA0, .opmask = 0xFD, .size = 6, .exec = pl330_dmago, },
+ { .opcode = 0x04, .opmask = 0xFC, .size = 1, .exec = pl330_dmald, },
+ { .opcode = 0x25, .opmask = 0xFD, .size = 2, .exec = pl330_dmaldp, },
+ { .opcode = 0x20, .opmask = 0xFD, .size = 2, .exec = pl330_dmalp, },
+ /* dmastp must be before dmalpend in this list, because their maps
+ * are overlapping
+ */
+ { .opcode = 0x29, .opmask = 0xFD, .size = 2, .exec = pl330_dmastp, },
+ { .opcode = 0x28, .opmask = 0xE8, .size = 2, .exec = pl330_dmalpend, },
+ { .opcode = 0x01, .opmask = 0xFF, .size = 1, .exec = pl330_dmakill, },
+ { .opcode = 0xBC, .opmask = 0xFF, .size = 6, .exec = pl330_dmamov, },
+ { .opcode = 0x18, .opmask = 0xFF, .size = 1, .exec = pl330_dmanop, },
+ { .opcode = 0x12, .opmask = 0xFF, .size = 1, .exec = pl330_dmarmb, },
+ { .opcode = 0x34, .opmask = 0xFF, .size = 2, .exec = pl330_dmasev, },
+ { .opcode = 0x08, .opmask = 0xFC, .size = 1, .exec = pl330_dmast, },
+ { .opcode = 0x0C, .opmask = 0xFF, .size = 1, .exec = pl330_dmastz, },
+ { .opcode = 0x36, .opmask = 0xFF, .size = 2, .exec = pl330_dmawfe, },
+ { .opcode = 0x30, .opmask = 0xFC, .size = 2, .exec = pl330_dmawfp, },
+ { .opcode = 0x13, .opmask = 0xFF, .size = 1, .exec = pl330_dmawmb, },
+ { .opcode = 0x00, .opmask = 0x00, .size = 0, .exec = NULL, }
+};
+
+/* Instructions which can be issued via debug registers. */
+static const PL330InsnDesc debug_insn_desc[] = {
+ { .opcode = 0xA0, .opmask = 0xFD, .size = 6, .exec = pl330_dmago, },
+ { .opcode = 0x01, .opmask = 0xFF, .size = 1, .exec = pl330_dmakill, },
+ { .opcode = 0x34, .opmask = 0xFF, .size = 2, .exec = pl330_dmasev, },
+ { .opcode = 0x00, .opmask = 0x00, .size = 0, .exec = NULL, }
+};
+
+static inline const PL330InsnDesc *pl330_fetch_insn(PL330Chan *ch)
+{
+ uint8_t opcode;
+ int i;
+
+ dma_memory_read(ch->parent->mem_as, ch->pc, &opcode, 1);
+ for (i = 0; insn_desc[i].size; i++) {
+ if ((opcode & insn_desc[i].opmask) == insn_desc[i].opcode) {
+ return &insn_desc[i];
+ }
+ }
+ return NULL;
+}
+
+static inline void pl330_exec_insn(PL330Chan *ch, const PL330InsnDesc *insn)
+{
+ uint8_t buf[PL330_INSN_MAXSIZE];
+
+ assert(insn->size <= PL330_INSN_MAXSIZE);
+ dma_memory_read(ch->parent->mem_as, ch->pc, buf, insn->size);
+ insn->exec(ch, buf[0], &buf[1], insn->size - 1);
+}
+
+static inline void pl330_update_pc(PL330Chan *ch,
+ const PL330InsnDesc *insn)
+{
+ ch->pc += insn->size;
+}
+
+/* Try to execute current instruction in channel CH. Number of executed
+ instructions is returned (0 or 1). */
+static int pl330_chan_exec(PL330Chan *ch)
+{
+ const PL330InsnDesc *insn;
+
+ if (ch->state != pl330_chan_executing &&
+ ch->state != pl330_chan_waiting_periph &&
+ ch->state != pl330_chan_at_barrier &&
+ ch->state != pl330_chan_waiting_event) {
+ return 0;
+ }
+ ch->stall = 0;
+ insn = pl330_fetch_insn(ch);
+ if (!insn) {
+ trace_pl330_chan_exec_undef();
+ pl330_fault(ch, PL330_FAULT_UNDEF_INSTR);
+ return 0;
+ }
+ pl330_exec_insn(ch, insn);
+ if (!ch->stall) {
+ pl330_update_pc(ch, insn);
+ ch->watchdog_timer = 0;
+ return 1;
+ /* WDT only active in exec state */
+ } else if (ch->state == pl330_chan_executing) {
+ ch->watchdog_timer++;
+ if (ch->watchdog_timer >= PL330_WATCHDOG_LIMIT) {
+ pl330_fault(ch, PL330_FAULT_LOCKUP_ERR);
+ }
+ }
+ return 0;
+}
+
+/* Try to execute 1 instruction in each channel, one instruction from read
+ queue and one instruction from write queue. Number of successfully executed
+ instructions is returned. */
+static int pl330_exec_cycle(PL330Chan *channel)
+{
+ PL330State *s = channel->parent;
+ PL330QueueEntry *q;
+ int i;
+ int num_exec = 0;
+ int fifo_res = 0;
+ uint8_t buf[PL330_MAX_BURST_LEN];
+
+ /* Execute one instruction in each channel */
+ num_exec += pl330_chan_exec(channel);
+
+ /* Execute one instruction from read queue */
+ q = pl330_queue_find_insn(&s->read_queue, PL330_UNTAGGED, true);
+ if (q != NULL && q->len <= pl330_fifo_num_free(&s->fifo)) {
+ int len = q->len - (q->addr & (q->len - 1));
+
+ dma_memory_read(s->mem_as, q->addr, buf, len);
+ trace_pl330_exec_cycle(q->addr, len);
+ if (trace_event_get_state_backends(TRACE_PL330_HEXDUMP)) {
+ pl330_hexdump(buf, len);
+ }
+ fifo_res = pl330_fifo_push(&s->fifo, buf, len, q->tag);
+ if (fifo_res == PL330_FIFO_OK) {
+ if (q->inc) {
+ q->addr += len;
+ }
+ q->n--;
+ if (!q->n) {
+ pl330_queue_remove_insn(&s->read_queue, q);
+ }
+ num_exec++;
+ }
+ }
+
+ /* Execute one instruction from write queue. */
+ q = pl330_queue_find_insn(&s->write_queue, pl330_fifo_tag(&s->fifo), true);
+ if (q != NULL) {
+ int len = q->len - (q->addr & (q->len - 1));
+
+ if (q->z) {
+ for (i = 0; i < len; i++) {
+ buf[i] = 0;
+ }
+ } else {
+ fifo_res = pl330_fifo_get(&s->fifo, buf, len, q->tag);
+ }
+ if (fifo_res == PL330_FIFO_OK || q->z) {
+ dma_memory_write(s->mem_as, q->addr, buf, len);
+ trace_pl330_exec_cycle(q->addr, len);
+ if (trace_event_get_state_backends(TRACE_PL330_HEXDUMP)) {
+ pl330_hexdump(buf, len);
+ }
+ if (q->inc) {
+ q->addr += len;
+ }
+ num_exec++;
+ } else if (fifo_res == PL330_FIFO_STALL) {
+ pl330_fault(&channel->parent->chan[q->tag],
+ PL330_FAULT_FIFOEMPTY_ERR);
+ }
+ q->n--;
+ if (!q->n) {
+ pl330_queue_remove_insn(&s->write_queue, q);
+ }
+ }
+
+ return num_exec;
+}
+
+static int pl330_exec_channel(PL330Chan *channel)
+{
+ int insr_exec = 0;
+
+ /* TODO: Is it all right to execute everything or should we do per-cycle
+ simulation? */
+ while (pl330_exec_cycle(channel)) {
+ insr_exec++;
+ }
+
+ /* Detect deadlock */
+ if (channel->state == pl330_chan_executing) {
+ pl330_fault(channel, PL330_FAULT_LOCKUP_ERR);
+ }
+ /* Situation when one of the queues has deadlocked but all channels
+ * have finished their programs should be impossible.
+ */
+
+ return insr_exec;
+}
+
+static inline void pl330_exec(PL330State *s)
+{
+ int i, insr_exec;
+ trace_pl330_exec();
+ do {
+ insr_exec = pl330_exec_channel(&s->manager);
+
+ for (i = 0; i < s->num_chnls; i++) {
+ insr_exec += pl330_exec_channel(&s->chan[i]);
+ }
+ } while (insr_exec);
+}
+
+static void pl330_exec_cycle_timer(void *opaque)
+{
+ PL330State *s = (PL330State *)opaque;
+ pl330_exec(s);
+}
+
+/* Stop or restore dma operations */
+
+static void pl330_dma_stop_irq(void *opaque, int irq, int level)
+{
+ PL330State *s = (PL330State *)opaque;
+
+ if (s->periph_busy[irq] != level) {
+ s->periph_busy[irq] = level;
+ timer_mod(s->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+ }
+}
+
+static void pl330_debug_exec(PL330State *s)
+{
+ uint8_t args[5];
+ uint8_t opcode;
+ uint8_t chan_id;
+ int i;
+ PL330Chan *ch;
+ const PL330InsnDesc *insn;
+
+ s->debug_status = 1;
+ chan_id = (s->dbg[0] >> 8) & 0x07;
+ opcode = (s->dbg[0] >> 16) & 0xff;
+ args[0] = (s->dbg[0] >> 24) & 0xff;
+ args[1] = (s->dbg[1] >> 0) & 0xff;
+ args[2] = (s->dbg[1] >> 8) & 0xff;
+ args[3] = (s->dbg[1] >> 16) & 0xff;
+ args[4] = (s->dbg[1] >> 24) & 0xff;
+ trace_pl330_debug_exec(chan_id);
+ if (s->dbg[0] & 1) {
+ ch = &s->chan[chan_id];
+ } else {
+ ch = &s->manager;
+ }
+ insn = NULL;
+ for (i = 0; debug_insn_desc[i].size; i++) {
+ if ((opcode & debug_insn_desc[i].opmask) == debug_insn_desc[i].opcode) {
+ insn = &debug_insn_desc[i];
+ }
+ }
+ if (!insn) {
+ pl330_fault(ch, PL330_FAULT_UNDEF_INSTR | PL330_FAULT_DBG_INSTR);
+ return ;
+ }
+ ch->stall = 0;
+ insn->exec(ch, opcode, args, insn->size - 1);
+ if (ch->fault_type) {
+ ch->fault_type |= PL330_FAULT_DBG_INSTR;
+ }
+ if (ch->stall) {
+ trace_pl330_debug_exec_stall();
+ qemu_log_mask(LOG_UNIMP, "pl330: stall of debug instruction not "
+ "implemented\n");
+ }
+ s->debug_status = 0;
+}
+
+/* IOMEM mapped registers */
+
+static void pl330_iomem_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ PL330State *s = (PL330State *) opaque;
+ int i;
+
+ trace_pl330_iomem_write((unsigned)offset, (unsigned)value);
+
+ switch (offset) {
+ case PL330_REG_INTEN:
+ s->inten = value;
+ break;
+ case PL330_REG_INTCLR:
+ for (i = 0; i < s->num_events; i++) {
+ if (s->int_status & s->inten & value & (1 << i)) {
+ trace_pl330_iomem_write_clr(i);
+ qemu_irq_lower(s->irq[i]);
+ }
+ }
+ s->ev_status &= ~(value & s->inten);
+ s->int_status &= ~(value & s->inten);
+ break;
+ case PL330_REG_DBGCMD:
+ if ((value & 3) == 0) {
+ pl330_debug_exec(s);
+ pl330_exec(s);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "pl330: write of illegal value %u "
+ "for offset " TARGET_FMT_plx "\n", (unsigned)value,
+ offset);
+ }
+ break;
+ case PL330_REG_DBGINST0:
+ s->dbg[0] = value;
+ break;
+ case PL330_REG_DBGINST1:
+ s->dbg[1] = value;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad write offset " TARGET_FMT_plx
+ "\n", offset);
+ break;
+ }
+}
+
+static inline uint32_t pl330_iomem_read_imp(void *opaque,
+ hwaddr offset)
+{
+ PL330State *s = (PL330State *)opaque;
+ int chan_id;
+ int i;
+ uint32_t res;
+
+ if (offset >= PL330_REG_PERIPH_ID && offset < PL330_REG_PERIPH_ID + 32) {
+ return pl330_id[(offset - PL330_REG_PERIPH_ID) >> 2];
+ }
+ if (offset >= PL330_REG_CR0_BASE && offset < PL330_REG_CR0_BASE + 24) {
+ return s->cfg[(offset - PL330_REG_CR0_BASE) >> 2];
+ }
+ if (offset >= PL330_REG_CHANCTRL && offset < PL330_REG_DBGSTATUS) {
+ offset -= PL330_REG_CHANCTRL;
+ chan_id = offset >> 5;
+ if (chan_id >= s->num_chnls) {
+ qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
+ TARGET_FMT_plx "\n", offset);
+ return 0;
+ }
+ switch (offset & 0x1f) {
+ case 0x00:
+ return s->chan[chan_id].src;
+ case 0x04:
+ return s->chan[chan_id].dst;
+ case 0x08:
+ return s->chan[chan_id].control;
+ case 0x0C:
+ return s->chan[chan_id].lc[0];
+ case 0x10:
+ return s->chan[chan_id].lc[1];
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
+ TARGET_FMT_plx "\n", offset);
+ return 0;
+ }
+ }
+ if (offset >= PL330_REG_CSR_BASE && offset < 0x400) {
+ offset -= PL330_REG_CSR_BASE;
+ chan_id = offset >> 3;
+ if (chan_id >= s->num_chnls) {
+ qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
+ TARGET_FMT_plx "\n", offset);
+ return 0;
+ }
+ switch ((offset >> 2) & 1) {
+ case 0x0:
+ res = (s->chan[chan_id].ns << 21) |
+ (s->chan[chan_id].wakeup << 4) |
+ (s->chan[chan_id].state) |
+ (s->chan[chan_id].wfp_sbp << 14);
+ return res;
+ case 0x1:
+ return s->chan[chan_id].pc;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "pl330: read error\n");
+ return 0;
+ }
+ }
+ if (offset >= PL330_REG_FTR_BASE && offset < 0x100) {
+ offset -= PL330_REG_FTR_BASE;
+ chan_id = offset >> 2;
+ if (chan_id >= s->num_chnls) {
+ qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
+ TARGET_FMT_plx "\n", offset);
+ return 0;
+ }
+ return s->chan[chan_id].fault_type;
+ }
+ switch (offset) {
+ case PL330_REG_DSR:
+ return (s->manager.ns << 9) | (s->manager.wakeup << 4) |
+ (s->manager.state & 0xf);
+ case PL330_REG_DPC:
+ return s->manager.pc;
+ case PL330_REG_INTEN:
+ return s->inten;
+ case PL330_REG_INT_EVENT_RIS:
+ return s->ev_status;
+ case PL330_REG_INTMIS:
+ return s->int_status;
+ case PL330_REG_INTCLR:
+ /* Documentation says that we can't read this register
+ * but linux kernel does it
+ */
+ return 0;
+ case PL330_REG_FSRD:
+ return s->manager.state ? 1 : 0;
+ case PL330_REG_FSRC:
+ res = 0;
+ for (i = 0; i < s->num_chnls; i++) {
+ if (s->chan[i].state == pl330_chan_fault ||
+ s->chan[i].state == pl330_chan_fault_completing) {
+ res |= 1 << i;
+ }
+ }
+ return res;
+ case PL330_REG_FTRD:
+ return s->manager.fault_type;
+ case PL330_REG_DBGSTATUS:
+ return s->debug_status;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset "
+ TARGET_FMT_plx "\n", offset);
+ }
+ return 0;
+}
+
+static uint64_t pl330_iomem_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ uint32_t ret = pl330_iomem_read_imp(opaque, offset);
+ trace_pl330_iomem_read((uint32_t)offset, ret);
+ return ret;
+}
+
+static const MemoryRegionOps pl330_ops = {
+ .read = pl330_iomem_read,
+ .write = pl330_iomem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+/* Controller logic and initialization */
+
+static void pl330_chan_reset(PL330Chan *ch)
+{
+ ch->src = 0;
+ ch->dst = 0;
+ ch->pc = 0;
+ ch->state = pl330_chan_stopped;
+ ch->watchdog_timer = 0;
+ ch->stall = 0;
+ ch->control = 0;
+ ch->status = 0;
+ ch->fault_type = 0;
+}
+
+static void pl330_reset(DeviceState *d)
+{
+ int i;
+ PL330State *s = PL330(d);
+
+ s->inten = 0;
+ s->int_status = 0;
+ s->ev_status = 0;
+ s->debug_status = 0;
+ s->num_faulting = 0;
+ s->manager.ns = s->mgr_ns_at_rst;
+ pl330_fifo_reset(&s->fifo);
+ pl330_queue_reset(&s->read_queue);
+ pl330_queue_reset(&s->write_queue);
+
+ for (i = 0; i < s->num_chnls; i++) {
+ pl330_chan_reset(&s->chan[i]);
+ }
+ for (i = 0; i < s->num_periph_req; i++) {
+ s->periph_busy[i] = 0;
+ }
+
+ timer_del(s->timer);
+}
+
+static void pl330_realize(DeviceState *dev, Error **errp)
+{
+ int i;
+ PL330State *s = PL330(dev);
+
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq_abort);
+ memory_region_init_io(&s->iomem, OBJECT(s), &pl330_ops, s,
+ "dma", PL330_IOMEM_SIZE);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
+
+ if (!s->mem_mr) {
+ error_setg(errp, "'memory' link is not set");
+ return;
+ } else if (s->mem_mr == get_system_memory()) {
+ /* Avoid creating new AS for system memory. */
+ s->mem_as = &address_space_memory;
+ } else {
+ s->mem_as = g_new0(AddressSpace, 1);
+ address_space_init(s->mem_as, s->mem_mr,
+ memory_region_name(s->mem_mr));
+ }
+
+ s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pl330_exec_cycle_timer, s);
+
+ s->cfg[0] = (s->mgr_ns_at_rst ? 0x4 : 0) |
+ (s->num_periph_req > 0 ? 1 : 0) |
+ ((s->num_chnls - 1) & 0x7) << 4 |
+ ((s->num_periph_req - 1) & 0x1f) << 12 |
+ ((s->num_events - 1) & 0x1f) << 17;
+
+ switch (s->i_cache_len) {
+ case (4):
+ s->cfg[1] |= 2;
+ break;
+ case (8):
+ s->cfg[1] |= 3;
+ break;
+ case (16):
+ s->cfg[1] |= 4;
+ break;
+ case (32):
+ s->cfg[1] |= 5;
+ break;
+ default:
+ error_setg(errp, "Bad value for i-cache_len property: %" PRIx8,
+ s->i_cache_len);
+ return;
+ }
+ s->cfg[1] |= ((s->num_i_cache_lines - 1) & 0xf) << 4;
+
+ s->chan = g_new0(PL330Chan, s->num_chnls);
+ s->hi_seqn = g_new0(uint8_t, s->num_chnls);
+ s->lo_seqn = g_new0(uint8_t, s->num_chnls);
+ for (i = 0; i < s->num_chnls; i++) {
+ s->chan[i].parent = s;
+ s->chan[i].tag = (uint8_t)i;
+ }
+ s->manager.parent = s;
+ s->manager.tag = s->num_chnls;
+ s->manager.is_manager = true;
+
+ s->irq = g_new0(qemu_irq, s->num_events);
+ for (i = 0; i < s->num_events; i++) {
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]);
+ }
+
+ qdev_init_gpio_in(dev, pl330_dma_stop_irq, PL330_PERIPH_NUM);
+
+ switch (s->data_width) {
+ case (32):
+ s->cfg[CFG_CRD] |= 0x2;
+ break;
+ case (64):
+ s->cfg[CFG_CRD] |= 0x3;
+ break;
+ case (128):
+ s->cfg[CFG_CRD] |= 0x4;
+ break;
+ default:
+ error_setg(errp, "Bad value for data_width property: %" PRIx8,
+ s->data_width);
+ return;
+ }
+
+ s->cfg[CFG_CRD] |= ((s->wr_cap - 1) & 0x7) << 4 |
+ ((s->wr_q_dep - 1) & 0xf) << 8 |
+ ((s->rd_cap - 1) & 0x7) << 12 |
+ ((s->rd_q_dep - 1) & 0xf) << 16 |
+ ((s->data_buffer_dep - 1) & 0x1ff) << 20;
+
+ pl330_queue_init(&s->read_queue, s->rd_q_dep, s);
+ pl330_queue_init(&s->write_queue, s->wr_q_dep, s);
+ pl330_fifo_init(&s->fifo, s->data_width / 4 * s->data_buffer_dep);
+}
+
+static Property pl330_properties[] = {
+ /* CR0 */
+ DEFINE_PROP_UINT32("num_chnls", PL330State, num_chnls, 8),
+ DEFINE_PROP_UINT8("num_periph_req", PL330State, num_periph_req, 4),
+ DEFINE_PROP_UINT8("num_events", PL330State, num_events, 16),
+ DEFINE_PROP_UINT8("mgr_ns_at_rst", PL330State, mgr_ns_at_rst, 0),
+ /* CR1 */
+ DEFINE_PROP_UINT8("i-cache_len", PL330State, i_cache_len, 4),
+ DEFINE_PROP_UINT8("num_i-cache_lines", PL330State, num_i_cache_lines, 8),
+ /* CR2-4 */
+ DEFINE_PROP_UINT32("boot_addr", PL330State, cfg[CFG_BOOT_ADDR], 0),
+ DEFINE_PROP_UINT32("INS", PL330State, cfg[CFG_INS], 0),
+ DEFINE_PROP_UINT32("PNS", PL330State, cfg[CFG_PNS], 0),
+ /* CRD */
+ DEFINE_PROP_UINT8("data_width", PL330State, data_width, 64),
+ DEFINE_PROP_UINT8("wr_cap", PL330State, wr_cap, 8),
+ DEFINE_PROP_UINT8("wr_q_dep", PL330State, wr_q_dep, 16),
+ DEFINE_PROP_UINT8("rd_cap", PL330State, rd_cap, 8),
+ DEFINE_PROP_UINT8("rd_q_dep", PL330State, rd_q_dep, 16),
+ DEFINE_PROP_UINT16("data_buffer_dep", PL330State, data_buffer_dep, 256),
+
+ DEFINE_PROP_LINK("memory", PL330State, mem_mr,
+ TYPE_MEMORY_REGION, MemoryRegion *),
+
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pl330_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = pl330_realize;
+ dc->reset = pl330_reset;
+ device_class_set_props(dc, pl330_properties);
+ dc->vmsd = &vmstate_pl330;
+}
+
+static const TypeInfo pl330_type_info = {
+ .name = TYPE_PL330,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(PL330State),
+ .class_init = pl330_class_init,
+};
+
+static void pl330_register_types(void)
+{
+ type_register_static(&pl330_type_info);
+}
+
+type_init(pl330_register_types)
diff --git a/hw/dma/pxa2xx_dma.c b/hw/dma/pxa2xx_dma.c
new file mode 100644
index 000000000..fa896f7ed
--- /dev/null
+++ b/hw/dma/pxa2xx_dma.c
@@ -0,0 +1,591 @@
+/*
+ * Intel XScale PXA255/270 DMA controller.
+ *
+ * Copyright (c) 2006 Openedhand Ltd.
+ * Copyright (c) 2006 Thorsten Zitterell
+ * Written by Andrzej Zaborowski <balrog@zabor.org>
+ *
+ * This code is licensed under the GPL.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "hw/hw.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/arm/pxa.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "qom/object.h"
+
+#define PXA255_DMA_NUM_CHANNELS 16
+#define PXA27X_DMA_NUM_CHANNELS 32
+
+#define PXA2XX_DMA_NUM_REQUESTS 75
+
+typedef struct {
+ uint32_t descr;
+ uint32_t src;
+ uint32_t dest;
+ uint32_t cmd;
+ uint32_t state;
+ int request;
+} PXA2xxDMAChannel;
+
+#define TYPE_PXA2XX_DMA "pxa2xx-dma"
+OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxDMAState, PXA2XX_DMA)
+
+struct PXA2xxDMAState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ uint32_t stopintr;
+ uint32_t eorintr;
+ uint32_t rasintr;
+ uint32_t startintr;
+ uint32_t endintr;
+
+ uint32_t align;
+ uint32_t pio;
+
+ int channels;
+ PXA2xxDMAChannel *chan;
+
+ uint8_t req[PXA2XX_DMA_NUM_REQUESTS];
+
+ /* Flag to avoid recursive DMA invocations. */
+ int running;
+};
+
+#define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */
+#define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */
+#define DALGN 0x00a0 /* DMA Alignment register */
+#define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */
+#define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */
+#define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */
+#define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */
+#define DINT 0x00f0 /* DMA Interrupt register */
+#define DRCMR0 0x0100 /* Request to Channel Map register 0 */
+#define DRCMR63 0x01fc /* Request to Channel Map register 63 */
+#define D_CH0 0x0200 /* Channel 0 Descriptor start */
+#define DRCMR64 0x1100 /* Request to Channel Map register 64 */
+#define DRCMR74 0x1128 /* Request to Channel Map register 74 */
+
+/* Per-channel register */
+#define DDADR 0x00
+#define DSADR 0x01
+#define DTADR 0x02
+#define DCMD 0x03
+
+/* Bit-field masks */
+#define DRCMR_CHLNUM 0x1f
+#define DRCMR_MAPVLD (1 << 7)
+#define DDADR_STOP (1 << 0)
+#define DDADR_BREN (1 << 1)
+#define DCMD_LEN 0x1fff
+#define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1))
+#define DCMD_SIZE(x) (4 << (((x) >> 16) & 3))
+#define DCMD_FLYBYT (1 << 19)
+#define DCMD_FLYBYS (1 << 20)
+#define DCMD_ENDIRQEN (1 << 21)
+#define DCMD_STARTIRQEN (1 << 22)
+#define DCMD_CMPEN (1 << 25)
+#define DCMD_FLOWTRG (1 << 28)
+#define DCMD_FLOWSRC (1 << 29)
+#define DCMD_INCTRGADDR (1 << 30)
+#define DCMD_INCSRCADDR (1 << 31)
+#define DCSR_BUSERRINTR (1 << 0)
+#define DCSR_STARTINTR (1 << 1)
+#define DCSR_ENDINTR (1 << 2)
+#define DCSR_STOPINTR (1 << 3)
+#define DCSR_RASINTR (1 << 4)
+#define DCSR_REQPEND (1 << 8)
+#define DCSR_EORINT (1 << 9)
+#define DCSR_CMPST (1 << 10)
+#define DCSR_MASKRUN (1 << 22)
+#define DCSR_RASIRQEN (1 << 23)
+#define DCSR_CLRCMPST (1 << 24)
+#define DCSR_SETCMPST (1 << 25)
+#define DCSR_EORSTOPEN (1 << 26)
+#define DCSR_EORJMPEN (1 << 27)
+#define DCSR_EORIRQEN (1 << 28)
+#define DCSR_STOPIRQEN (1 << 29)
+#define DCSR_NODESCFETCH (1 << 30)
+#define DCSR_RUN (1 << 31)
+
+static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch)
+{
+ if (ch >= 0) {
+ if ((s->chan[ch].state & DCSR_STOPIRQEN) &&
+ (s->chan[ch].state & DCSR_STOPINTR))
+ s->stopintr |= 1 << ch;
+ else
+ s->stopintr &= ~(1 << ch);
+
+ if ((s->chan[ch].state & DCSR_EORIRQEN) &&
+ (s->chan[ch].state & DCSR_EORINT))
+ s->eorintr |= 1 << ch;
+ else
+ s->eorintr &= ~(1 << ch);
+
+ if ((s->chan[ch].state & DCSR_RASIRQEN) &&
+ (s->chan[ch].state & DCSR_RASINTR))
+ s->rasintr |= 1 << ch;
+ else
+ s->rasintr &= ~(1 << ch);
+
+ if (s->chan[ch].state & DCSR_STARTINTR)
+ s->startintr |= 1 << ch;
+ else
+ s->startintr &= ~(1 << ch);
+
+ if (s->chan[ch].state & DCSR_ENDINTR)
+ s->endintr |= 1 << ch;
+ else
+ s->endintr &= ~(1 << ch);
+ }
+
+ if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr)
+ qemu_irq_raise(s->irq);
+ else
+ qemu_irq_lower(s->irq);
+}
+
+static inline void pxa2xx_dma_descriptor_fetch(
+ PXA2xxDMAState *s, int ch)
+{
+ uint32_t desc[4];
+ hwaddr daddr = s->chan[ch].descr & ~0xf;
+ if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST))
+ daddr += 32;
+
+ cpu_physical_memory_read(daddr, desc, 16);
+ s->chan[ch].descr = desc[DDADR];
+ s->chan[ch].src = desc[DSADR];
+ s->chan[ch].dest = desc[DTADR];
+ s->chan[ch].cmd = desc[DCMD];
+
+ if (s->chan[ch].cmd & DCMD_FLOWSRC)
+ s->chan[ch].src &= ~3;
+ if (s->chan[ch].cmd & DCMD_FLOWTRG)
+ s->chan[ch].dest &= ~3;
+
+ if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT))
+ printf("%s: unsupported mode in channel %i\n", __func__, ch);
+
+ if (s->chan[ch].cmd & DCMD_STARTIRQEN)
+ s->chan[ch].state |= DCSR_STARTINTR;
+}
+
+static void pxa2xx_dma_run(PXA2xxDMAState *s)
+{
+ int c, srcinc, destinc;
+ uint32_t n, size;
+ uint32_t width;
+ uint32_t length;
+ uint8_t buffer[32];
+ PXA2xxDMAChannel *ch;
+
+ if (s->running ++)
+ return;
+
+ while (s->running) {
+ s->running = 1;
+ for (c = 0; c < s->channels; c ++) {
+ ch = &s->chan[c];
+
+ while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) {
+ /* Test for pending requests */
+ if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request)
+ break;
+
+ length = ch->cmd & DCMD_LEN;
+ size = DCMD_SIZE(ch->cmd);
+ width = DCMD_WIDTH(ch->cmd);
+
+ srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0;
+ destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0;
+
+ while (length) {
+ size = MIN(length, size);
+
+ for (n = 0; n < size; n += width) {
+ cpu_physical_memory_read(ch->src, buffer + n, width);
+ ch->src += srcinc;
+ }
+
+ for (n = 0; n < size; n += width) {
+ cpu_physical_memory_write(ch->dest, buffer + n, width);
+ ch->dest += destinc;
+ }
+
+ length -= size;
+
+ if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) &&
+ !ch->request) {
+ ch->state |= DCSR_EORINT;
+ if (ch->state & DCSR_EORSTOPEN)
+ ch->state |= DCSR_STOPINTR;
+ if ((ch->state & DCSR_EORJMPEN) &&
+ !(ch->state & DCSR_NODESCFETCH))
+ pxa2xx_dma_descriptor_fetch(s, c);
+ break;
+ }
+ }
+
+ ch->cmd = (ch->cmd & ~DCMD_LEN) | length;
+
+ /* Is the transfer complete now? */
+ if (!length) {
+ if (ch->cmd & DCMD_ENDIRQEN)
+ ch->state |= DCSR_ENDINTR;
+
+ if ((ch->state & DCSR_NODESCFETCH) ||
+ (ch->descr & DDADR_STOP) ||
+ (ch->state & DCSR_EORSTOPEN)) {
+ ch->state |= DCSR_STOPINTR;
+ ch->state &= ~DCSR_RUN;
+
+ break;
+ }
+
+ ch->state |= DCSR_STOPINTR;
+ break;
+ }
+ }
+ }
+
+ s->running --;
+ }
+}
+
+static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
+ unsigned int channel;
+
+ if (size != 4) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n",
+ __func__, size);
+ return 5;
+ }
+
+ switch (offset) {
+ case DRCMR64 ... DRCMR74:
+ offset -= DRCMR64 - DRCMR0 - (64 << 2);
+ /* Fall through */
+ case DRCMR0 ... DRCMR63:
+ channel = (offset - DRCMR0) >> 2;
+ return s->req[channel];
+
+ case DRQSR0:
+ case DRQSR1:
+ case DRQSR2:
+ return 0;
+
+ case DCSR0 ... DCSR31:
+ channel = offset >> 2;
+ if (s->chan[channel].request)
+ return s->chan[channel].state | DCSR_REQPEND;
+ return s->chan[channel].state;
+
+ case DINT:
+ return s->stopintr | s->eorintr | s->rasintr |
+ s->startintr | s->endintr;
+
+ case DALGN:
+ return s->align;
+
+ case DPCSR:
+ return s->pio;
+ }
+
+ if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
+ channel = (offset - D_CH0) >> 4;
+ switch ((offset & 0x0f) >> 2) {
+ case DDADR:
+ return s->chan[channel].descr;
+ case DSADR:
+ return s->chan[channel].src;
+ case DTADR:
+ return s->chan[channel].dest;
+ case DCMD:
+ return s->chan[channel].cmd;
+ }
+ }
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
+ __func__, offset);
+ return 7;
+}
+
+static void pxa2xx_dma_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
+ unsigned int channel;
+
+ if (size != 4) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n",
+ __func__, size);
+ return;
+ }
+
+ switch (offset) {
+ case DRCMR64 ... DRCMR74:
+ offset -= DRCMR64 - DRCMR0 - (64 << 2);
+ /* Fall through */
+ case DRCMR0 ... DRCMR63:
+ channel = (offset - DRCMR0) >> 2;
+
+ if (value & DRCMR_MAPVLD)
+ if ((value & DRCMR_CHLNUM) > s->channels)
+ hw_error("%s: Bad DMA channel %i\n",
+ __func__, (unsigned)value & DRCMR_CHLNUM);
+
+ s->req[channel] = value;
+ break;
+
+ case DRQSR0:
+ case DRQSR1:
+ case DRQSR2:
+ /* Nothing to do */
+ break;
+
+ case DCSR0 ... DCSR31:
+ channel = offset >> 2;
+ s->chan[channel].state &= 0x0000071f & ~(value &
+ (DCSR_EORINT | DCSR_ENDINTR |
+ DCSR_STARTINTR | DCSR_BUSERRINTR));
+ s->chan[channel].state |= value & 0xfc800000;
+
+ if (s->chan[channel].state & DCSR_STOPIRQEN)
+ s->chan[channel].state &= ~DCSR_STOPINTR;
+
+ if (value & DCSR_NODESCFETCH) {
+ /* No-descriptor-fetch mode */
+ if (value & DCSR_RUN) {
+ s->chan[channel].state &= ~DCSR_STOPINTR;
+ pxa2xx_dma_run(s);
+ }
+ } else {
+ /* Descriptor-fetch mode */
+ if (value & DCSR_RUN) {
+ s->chan[channel].state &= ~DCSR_STOPINTR;
+ pxa2xx_dma_descriptor_fetch(s, channel);
+ pxa2xx_dma_run(s);
+ }
+ }
+
+ /* Shouldn't matter as our DMA is synchronous. */
+ if (!(value & (DCSR_RUN | DCSR_MASKRUN)))
+ s->chan[channel].state |= DCSR_STOPINTR;
+
+ if (value & DCSR_CLRCMPST)
+ s->chan[channel].state &= ~DCSR_CMPST;
+ if (value & DCSR_SETCMPST)
+ s->chan[channel].state |= DCSR_CMPST;
+
+ pxa2xx_dma_update(s, channel);
+ break;
+
+ case DALGN:
+ s->align = value;
+ break;
+
+ case DPCSR:
+ s->pio = value & 0x80000001;
+ break;
+
+ default:
+ if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
+ channel = (offset - D_CH0) >> 4;
+ switch ((offset & 0x0f) >> 2) {
+ case DDADR:
+ s->chan[channel].descr = value;
+ break;
+ case DSADR:
+ s->chan[channel].src = value;
+ break;
+ case DTADR:
+ s->chan[channel].dest = value;
+ break;
+ case DCMD:
+ s->chan[channel].cmd = value;
+ break;
+ default:
+ goto fail;
+ }
+
+ break;
+ }
+ fail:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
+ __func__, offset);
+ }
+}
+
+static const MemoryRegionOps pxa2xx_dma_ops = {
+ .read = pxa2xx_dma_read,
+ .write = pxa2xx_dma_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void pxa2xx_dma_request(void *opaque, int req_num, int on)
+{
+ PXA2xxDMAState *s = opaque;
+ int ch;
+ if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS)
+ hw_error("%s: Bad DMA request %i\n", __func__, req_num);
+
+ if (!(s->req[req_num] & DRCMR_MAPVLD))
+ return;
+ ch = s->req[req_num] & DRCMR_CHLNUM;
+
+ if (!s->chan[ch].request && on)
+ s->chan[ch].state |= DCSR_RASINTR;
+ else
+ s->chan[ch].state &= ~DCSR_RASINTR;
+ if (s->chan[ch].request && !on)
+ s->chan[ch].state |= DCSR_EORINT;
+
+ s->chan[ch].request = on;
+ if (on) {
+ pxa2xx_dma_run(s);
+ pxa2xx_dma_update(s, ch);
+ }
+}
+
+static void pxa2xx_dma_init(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ PXA2xxDMAState *s = PXA2XX_DMA(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS);
+
+ qdev_init_gpio_in(dev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS);
+
+ memory_region_init_io(&s->iomem, obj, &pxa2xx_dma_ops, s,
+ "pxa2xx.dma", 0x00010000);
+ sysbus_init_mmio(sbd, &s->iomem);
+ sysbus_init_irq(sbd, &s->irq);
+}
+
+static void pxa2xx_dma_realize(DeviceState *dev, Error **errp)
+{
+ PXA2xxDMAState *s = PXA2XX_DMA(dev);
+ int i;
+
+ if (s->channels <= 0) {
+ error_setg(errp, "channels value invalid");
+ return;
+ }
+
+ s->chan = g_new0(PXA2xxDMAChannel, s->channels);
+
+ for (i = 0; i < s->channels; i ++)
+ s->chan[i].state = DCSR_STOPINTR;
+}
+
+DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq)
+{
+ DeviceState *dev;
+
+ dev = qdev_new("pxa2xx-dma");
+ qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
+
+ return dev;
+}
+
+DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq)
+{
+ DeviceState *dev;
+
+ dev = qdev_new("pxa2xx-dma");
+ qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
+
+ return dev;
+}
+
+static bool is_version_0(void *opaque, int version_id)
+{
+ return version_id == 0;
+}
+
+static const VMStateDescription vmstate_pxa2xx_dma_chan = {
+ .name = "pxa2xx_dma_chan",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(descr, PXA2xxDMAChannel),
+ VMSTATE_UINT32(src, PXA2xxDMAChannel),
+ VMSTATE_UINT32(dest, PXA2xxDMAChannel),
+ VMSTATE_UINT32(cmd, PXA2xxDMAChannel),
+ VMSTATE_UINT32(state, PXA2xxDMAChannel),
+ VMSTATE_INT32(request, PXA2xxDMAChannel),
+ VMSTATE_END_OF_LIST(),
+ },
+};
+
+static const VMStateDescription vmstate_pxa2xx_dma = {
+ .name = "pxa2xx_dma",
+ .version_id = 1,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UNUSED_TEST(is_version_0, 4),
+ VMSTATE_UINT32(stopintr, PXA2xxDMAState),
+ VMSTATE_UINT32(eorintr, PXA2xxDMAState),
+ VMSTATE_UINT32(rasintr, PXA2xxDMAState),
+ VMSTATE_UINT32(startintr, PXA2xxDMAState),
+ VMSTATE_UINT32(endintr, PXA2xxDMAState),
+ VMSTATE_UINT32(align, PXA2xxDMAState),
+ VMSTATE_UINT32(pio, PXA2xxDMAState),
+ VMSTATE_BUFFER(req, PXA2xxDMAState),
+ VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels,
+ vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel),
+ VMSTATE_END_OF_LIST(),
+ },
+};
+
+static Property pxa2xx_dma_properties[] = {
+ DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pxa2xx_dma_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "PXA2xx DMA controller";
+ dc->vmsd = &vmstate_pxa2xx_dma;
+ device_class_set_props(dc, pxa2xx_dma_properties);
+ dc->realize = pxa2xx_dma_realize;
+}
+
+static const TypeInfo pxa2xx_dma_info = {
+ .name = TYPE_PXA2XX_DMA,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(PXA2xxDMAState),
+ .instance_init = pxa2xx_dma_init,
+ .class_init = pxa2xx_dma_class_init,
+};
+
+static void pxa2xx_dma_register_types(void)
+{
+ type_register_static(&pxa2xx_dma_info);
+}
+
+type_init(pxa2xx_dma_register_types)
diff --git a/hw/dma/rc4030.c b/hw/dma/rc4030.c
new file mode 100644
index 000000000..e4d2f1725
--- /dev/null
+++ b/hw/dma/rc4030.c
@@ -0,0 +1,754 @@
+/*
+ * QEMU JAZZ RC4030 chipset
+ *
+ * Copyright (c) 2007-2013 Hervé Poussineau
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "hw/irq.h"
+#include "hw/mips/mips.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+#include "qemu/timer.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "exec/address-spaces.h"
+#include "trace.h"
+#include "qom/object.h"
+
+/********************************************************/
+/* rc4030 emulation */
+
+typedef struct dma_pagetable_entry {
+ int32_t frame;
+ int32_t owner;
+} QEMU_PACKED dma_pagetable_entry;
+
+#define DMA_PAGESIZE 4096
+#define DMA_REG_ENABLE 1
+#define DMA_REG_COUNT 2
+#define DMA_REG_ADDRESS 3
+
+#define DMA_FLAG_ENABLE 0x0001
+#define DMA_FLAG_MEM_TO_DEV 0x0002
+#define DMA_FLAG_TC_INTR 0x0100
+#define DMA_FLAG_MEM_INTR 0x0200
+#define DMA_FLAG_ADDR_INTR 0x0400
+
+#define TYPE_RC4030 "rc4030"
+OBJECT_DECLARE_SIMPLE_TYPE(rc4030State, RC4030)
+
+#define TYPE_RC4030_IOMMU_MEMORY_REGION "rc4030-iommu-memory-region"
+
+struct rc4030State {
+
+ SysBusDevice parent;
+
+ uint32_t config; /* 0x0000: RC4030 config register */
+ uint32_t revision; /* 0x0008: RC4030 Revision register */
+ uint32_t invalid_address_register; /* 0x0010: Invalid Address register */
+
+ /* DMA */
+ uint32_t dma_regs[8][4];
+ uint32_t dma_tl_base; /* 0x0018: DMA transl. table base */
+ uint32_t dma_tl_limit; /* 0x0020: DMA transl. table limit */
+
+ /* cache */
+ uint32_t cache_maint; /* 0x0030: Cache Maintenance */
+ uint32_t remote_failed_address; /* 0x0038: Remote Failed Address */
+ uint32_t memory_failed_address; /* 0x0040: Memory Failed Address */
+ uint32_t cache_ptag; /* 0x0048: I/O Cache Physical Tag */
+ uint32_t cache_ltag; /* 0x0050: I/O Cache Logical Tag */
+ uint32_t cache_bmask; /* 0x0058: I/O Cache Byte Mask */
+
+ uint32_t nmi_interrupt; /* 0x0200: interrupt source */
+ uint32_t memory_refresh_rate; /* 0x0210: memory refresh rate */
+ uint32_t nvram_protect; /* 0x0220: NV ram protect register */
+ uint32_t rem_speed[16];
+ uint32_t imr_jazz; /* Local bus int enable mask */
+ uint32_t isr_jazz; /* Local bus int source */
+
+ /* timer */
+ QEMUTimer *periodic_timer;
+ uint32_t itr; /* Interval timer reload */
+
+ qemu_irq timer_irq;
+ qemu_irq jazz_bus_irq;
+
+ /* whole DMA memory region, root of DMA address space */
+ IOMMUMemoryRegion dma_mr;
+ AddressSpace dma_as;
+
+ MemoryRegion iomem_chipset;
+ MemoryRegion iomem_jazzio;
+};
+
+static void set_next_tick(rc4030State *s)
+{
+ uint32_t tm_hz;
+ qemu_irq_lower(s->timer_irq);
+
+ tm_hz = 1000 / (s->itr + 1);
+
+ timer_mod(s->periodic_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ NANOSECONDS_PER_SECOND / tm_hz);
+}
+
+/* called for accesses to rc4030 */
+static uint64_t rc4030_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ rc4030State *s = opaque;
+ uint32_t val;
+
+ addr &= 0x3fff;
+ switch (addr & ~0x3) {
+ /* Global config register */
+ case 0x0000:
+ val = s->config;
+ break;
+ /* Revision register */
+ case 0x0008:
+ val = s->revision;
+ break;
+ /* Invalid Address register */
+ case 0x0010:
+ val = s->invalid_address_register;
+ break;
+ /* DMA transl. table base */
+ case 0x0018:
+ val = s->dma_tl_base;
+ break;
+ /* DMA transl. table limit */
+ case 0x0020:
+ val = s->dma_tl_limit;
+ break;
+ /* Remote Failed Address */
+ case 0x0038:
+ val = s->remote_failed_address;
+ break;
+ /* Memory Failed Address */
+ case 0x0040:
+ val = s->memory_failed_address;
+ break;
+ /* I/O Cache Byte Mask */
+ case 0x0058:
+ val = s->cache_bmask;
+ /* HACK */
+ if (s->cache_bmask == (uint32_t)-1) {
+ s->cache_bmask = 0;
+ }
+ break;
+ /* Remote Speed Registers */
+ case 0x0070:
+ case 0x0078:
+ case 0x0080:
+ case 0x0088:
+ case 0x0090:
+ case 0x0098:
+ case 0x00a0:
+ case 0x00a8:
+ case 0x00b0:
+ case 0x00b8:
+ case 0x00c0:
+ case 0x00c8:
+ case 0x00d0:
+ case 0x00d8:
+ case 0x00e0:
+ case 0x00e8:
+ val = s->rem_speed[(addr - 0x0070) >> 3];
+ break;
+ /* DMA channel base address */
+ case 0x0100:
+ case 0x0108:
+ case 0x0110:
+ case 0x0118:
+ case 0x0120:
+ case 0x0128:
+ case 0x0130:
+ case 0x0138:
+ case 0x0140:
+ case 0x0148:
+ case 0x0150:
+ case 0x0158:
+ case 0x0160:
+ case 0x0168:
+ case 0x0170:
+ case 0x0178:
+ case 0x0180:
+ case 0x0188:
+ case 0x0190:
+ case 0x0198:
+ case 0x01a0:
+ case 0x01a8:
+ case 0x01b0:
+ case 0x01b8:
+ case 0x01c0:
+ case 0x01c8:
+ case 0x01d0:
+ case 0x01d8:
+ case 0x01e0:
+ case 0x01e8:
+ case 0x01f0:
+ case 0x01f8:
+ {
+ int entry = (addr - 0x0100) >> 5;
+ int idx = (addr & 0x1f) >> 3;
+ val = s->dma_regs[entry][idx];
+ }
+ break;
+ /* Interrupt source */
+ case 0x0200:
+ val = s->nmi_interrupt;
+ break;
+ /* Error type */
+ case 0x0208:
+ val = 0;
+ break;
+ /* Memory refresh rate */
+ case 0x0210:
+ val = s->memory_refresh_rate;
+ break;
+ /* NV ram protect register */
+ case 0x0220:
+ val = s->nvram_protect;
+ break;
+ /* Interval timer count */
+ case 0x0230:
+ val = 0;
+ qemu_irq_lower(s->timer_irq);
+ break;
+ /* EISA interrupt */
+ case 0x0238:
+ val = 7; /* FIXME: should be read from EISA controller */
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "rc4030: invalid read at 0x%x", (int)addr);
+ val = 0;
+ break;
+ }
+
+ if ((addr & ~3) != 0x230) {
+ trace_rc4030_read(addr, val);
+ }
+
+ return val;
+}
+
+static void rc4030_write(void *opaque, hwaddr addr, uint64_t data,
+ unsigned int size)
+{
+ rc4030State *s = opaque;
+ uint32_t val = data;
+ addr &= 0x3fff;
+
+ trace_rc4030_write(addr, val);
+
+ switch (addr & ~0x3) {
+ /* Global config register */
+ case 0x0000:
+ s->config = val;
+ break;
+ /* DMA transl. table base */
+ case 0x0018:
+ s->dma_tl_base = val;
+ break;
+ /* DMA transl. table limit */
+ case 0x0020:
+ s->dma_tl_limit = val;
+ break;
+ /* DMA transl. table invalidated */
+ case 0x0028:
+ break;
+ /* Cache Maintenance */
+ case 0x0030:
+ s->cache_maint = val;
+ break;
+ /* I/O Cache Physical Tag */
+ case 0x0048:
+ s->cache_ptag = val;
+ break;
+ /* I/O Cache Logical Tag */
+ case 0x0050:
+ s->cache_ltag = val;
+ break;
+ /* I/O Cache Byte Mask */
+ case 0x0058:
+ s->cache_bmask |= val; /* HACK */
+ break;
+ /* I/O Cache Buffer Window */
+ case 0x0060:
+ /* HACK */
+ if (s->cache_ltag == 0x80000001 && s->cache_bmask == 0xf0f0f0f) {
+ hwaddr dest = s->cache_ptag & ~0x1;
+ dest += (s->cache_maint & 0x3) << 3;
+ cpu_physical_memory_write(dest, &val, 4);
+ }
+ break;
+ /* Remote Speed Registers */
+ case 0x0070:
+ case 0x0078:
+ case 0x0080:
+ case 0x0088:
+ case 0x0090:
+ case 0x0098:
+ case 0x00a0:
+ case 0x00a8:
+ case 0x00b0:
+ case 0x00b8:
+ case 0x00c0:
+ case 0x00c8:
+ case 0x00d0:
+ case 0x00d8:
+ case 0x00e0:
+ case 0x00e8:
+ s->rem_speed[(addr - 0x0070) >> 3] = val;
+ break;
+ /* DMA channel base address */
+ case 0x0100:
+ case 0x0108:
+ case 0x0110:
+ case 0x0118:
+ case 0x0120:
+ case 0x0128:
+ case 0x0130:
+ case 0x0138:
+ case 0x0140:
+ case 0x0148:
+ case 0x0150:
+ case 0x0158:
+ case 0x0160:
+ case 0x0168:
+ case 0x0170:
+ case 0x0178:
+ case 0x0180:
+ case 0x0188:
+ case 0x0190:
+ case 0x0198:
+ case 0x01a0:
+ case 0x01a8:
+ case 0x01b0:
+ case 0x01b8:
+ case 0x01c0:
+ case 0x01c8:
+ case 0x01d0:
+ case 0x01d8:
+ case 0x01e0:
+ case 0x01e8:
+ case 0x01f0:
+ case 0x01f8:
+ {
+ int entry = (addr - 0x0100) >> 5;
+ int idx = (addr & 0x1f) >> 3;
+ s->dma_regs[entry][idx] = val;
+ }
+ break;
+ /* Memory refresh rate */
+ case 0x0210:
+ s->memory_refresh_rate = val;
+ break;
+ /* Interval timer reload */
+ case 0x0228:
+ s->itr = val & 0x01FF;
+ qemu_irq_lower(s->timer_irq);
+ set_next_tick(s);
+ break;
+ /* EISA interrupt */
+ case 0x0238:
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "rc4030: invalid write of 0x%02x at 0x%x",
+ val, (int)addr);
+ break;
+ }
+}
+
+static const MemoryRegionOps rc4030_ops = {
+ .read = rc4030_read,
+ .write = rc4030_write,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 4,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void update_jazz_irq(rc4030State *s)
+{
+ uint16_t pending;
+
+ pending = s->isr_jazz & s->imr_jazz;
+
+ if (pending != 0) {
+ qemu_irq_raise(s->jazz_bus_irq);
+ } else {
+ qemu_irq_lower(s->jazz_bus_irq);
+ }
+}
+
+static void rc4030_irq_jazz_request(void *opaque, int irq, int level)
+{
+ rc4030State *s = opaque;
+
+ if (level) {
+ s->isr_jazz |= 1 << irq;
+ } else {
+ s->isr_jazz &= ~(1 << irq);
+ }
+
+ update_jazz_irq(s);
+}
+
+static void rc4030_periodic_timer(void *opaque)
+{
+ rc4030State *s = opaque;
+
+ set_next_tick(s);
+ qemu_irq_raise(s->timer_irq);
+}
+
+static uint64_t jazzio_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ rc4030State *s = opaque;
+ uint32_t val;
+ uint32_t irq;
+ addr &= 0xfff;
+
+ switch (addr) {
+ /* Local bus int source */
+ case 0x00: {
+ uint32_t pending = s->isr_jazz & s->imr_jazz;
+ val = 0;
+ irq = 0;
+ while (pending) {
+ if (pending & 1) {
+ val = (irq + 1) << 2;
+ break;
+ }
+ irq++;
+ pending >>= 1;
+ }
+ break;
+ }
+ /* Local bus int enable mask */
+ case 0x02:
+ val = s->imr_jazz;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "rc4030/jazzio: invalid read at 0x%x", (int)addr);
+ val = 0;
+ break;
+ }
+
+ trace_jazzio_read(addr, val);
+
+ return val;
+}
+
+static void jazzio_write(void *opaque, hwaddr addr, uint64_t data,
+ unsigned int size)
+{
+ rc4030State *s = opaque;
+ uint32_t val = data;
+ addr &= 0xfff;
+
+ trace_jazzio_write(addr, val);
+
+ switch (addr) {
+ /* Local bus int enable mask */
+ case 0x02:
+ s->imr_jazz = val;
+ update_jazz_irq(s);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "rc4030/jazzio: invalid write of 0x%02x at 0x%x",
+ val, (int)addr);
+ break;
+ }
+}
+
+static const MemoryRegionOps jazzio_ops = {
+ .read = jazzio_read,
+ .write = jazzio_write,
+ .impl.min_access_size = 2,
+ .impl.max_access_size = 2,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static IOMMUTLBEntry rc4030_dma_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
+ IOMMUAccessFlags flag, int iommu_idx)
+{
+ rc4030State *s = container_of(iommu, rc4030State, dma_mr);
+ IOMMUTLBEntry ret = {
+ .target_as = &address_space_memory,
+ .iova = addr & ~(DMA_PAGESIZE - 1),
+ .translated_addr = 0,
+ .addr_mask = DMA_PAGESIZE - 1,
+ .perm = IOMMU_NONE,
+ };
+ uint64_t i, entry_address;
+ dma_pagetable_entry entry;
+
+ i = addr / DMA_PAGESIZE;
+ if (i < s->dma_tl_limit / sizeof(entry)) {
+ entry_address = (s->dma_tl_base & 0x7fffffff) + i * sizeof(entry);
+ if (address_space_read(ret.target_as, entry_address,
+ MEMTXATTRS_UNSPECIFIED, &entry, sizeof(entry))
+ == MEMTX_OK) {
+ ret.translated_addr = entry.frame & ~(DMA_PAGESIZE - 1);
+ ret.perm = IOMMU_RW;
+ }
+ }
+
+ return ret;
+}
+
+static void rc4030_reset(DeviceState *dev)
+{
+ rc4030State *s = RC4030(dev);
+ int i;
+
+ s->config = 0x410; /* some boards seem to accept 0x104 too */
+ s->revision = 1;
+ s->invalid_address_register = 0;
+
+ memset(s->dma_regs, 0, sizeof(s->dma_regs));
+
+ s->remote_failed_address = s->memory_failed_address = 0;
+ s->cache_maint = 0;
+ s->cache_ptag = s->cache_ltag = 0;
+ s->cache_bmask = 0;
+
+ s->memory_refresh_rate = 0x18186;
+ s->nvram_protect = 7;
+ for (i = 0; i < 15; i++) {
+ s->rem_speed[i] = 7;
+ }
+ s->imr_jazz = 0x10; /* XXX: required by firmware, but why? */
+ s->isr_jazz = 0;
+
+ s->itr = 0;
+
+ qemu_irq_lower(s->timer_irq);
+ qemu_irq_lower(s->jazz_bus_irq);
+}
+
+static int rc4030_post_load(void *opaque, int version_id)
+{
+ rc4030State *s = opaque;
+
+ set_next_tick(s);
+ update_jazz_irq(s);
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_rc4030 = {
+ .name = "rc4030",
+ .version_id = 3,
+ .post_load = rc4030_post_load,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT32(config, rc4030State),
+ VMSTATE_UINT32(invalid_address_register, rc4030State),
+ VMSTATE_UINT32_2DARRAY(dma_regs, rc4030State, 8, 4),
+ VMSTATE_UINT32(dma_tl_base, rc4030State),
+ VMSTATE_UINT32(dma_tl_limit, rc4030State),
+ VMSTATE_UINT32(cache_maint, rc4030State),
+ VMSTATE_UINT32(remote_failed_address, rc4030State),
+ VMSTATE_UINT32(memory_failed_address, rc4030State),
+ VMSTATE_UINT32(cache_ptag, rc4030State),
+ VMSTATE_UINT32(cache_ltag, rc4030State),
+ VMSTATE_UINT32(cache_bmask, rc4030State),
+ VMSTATE_UINT32(memory_refresh_rate, rc4030State),
+ VMSTATE_UINT32(nvram_protect, rc4030State),
+ VMSTATE_UINT32_ARRAY(rem_speed, rc4030State, 16),
+ VMSTATE_UINT32(imr_jazz, rc4030State),
+ VMSTATE_UINT32(isr_jazz, rc4030State),
+ VMSTATE_UINT32(itr, rc4030State),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void rc4030_do_dma(void *opaque, int n, uint8_t *buf,
+ int len, bool is_write)
+{
+ rc4030State *s = opaque;
+ hwaddr dma_addr;
+ int dev_to_mem;
+
+ s->dma_regs[n][DMA_REG_ENABLE] &=
+ ~(DMA_FLAG_TC_INTR | DMA_FLAG_MEM_INTR | DMA_FLAG_ADDR_INTR);
+
+ /* Check DMA channel consistency */
+ dev_to_mem = (s->dma_regs[n][DMA_REG_ENABLE] & DMA_FLAG_MEM_TO_DEV) ? 0 : 1;
+ if (!(s->dma_regs[n][DMA_REG_ENABLE] & DMA_FLAG_ENABLE) ||
+ (is_write != dev_to_mem)) {
+ s->dma_regs[n][DMA_REG_ENABLE] |= DMA_FLAG_MEM_INTR;
+ s->nmi_interrupt |= 1 << n;
+ return;
+ }
+
+ /* Get start address and len */
+ if (len > s->dma_regs[n][DMA_REG_COUNT]) {
+ len = s->dma_regs[n][DMA_REG_COUNT];
+ }
+ dma_addr = s->dma_regs[n][DMA_REG_ADDRESS];
+
+ /* Read/write data at right place */
+ address_space_rw(&s->dma_as, dma_addr, MEMTXATTRS_UNSPECIFIED,
+ buf, len, is_write);
+
+ s->dma_regs[n][DMA_REG_ENABLE] |= DMA_FLAG_TC_INTR;
+ s->dma_regs[n][DMA_REG_COUNT] -= len;
+}
+
+struct rc4030DMAState {
+ void *opaque;
+ int n;
+};
+
+void rc4030_dma_read(void *dma, uint8_t *buf, int len)
+{
+ rc4030_dma s = dma;
+ rc4030_do_dma(s->opaque, s->n, buf, len, false);
+}
+
+void rc4030_dma_write(void *dma, uint8_t *buf, int len)
+{
+ rc4030_dma s = dma;
+ rc4030_do_dma(s->opaque, s->n, buf, len, true);
+}
+
+static rc4030_dma *rc4030_allocate_dmas(void *opaque, int n)
+{
+ rc4030_dma *s;
+ struct rc4030DMAState *p;
+ int i;
+
+ s = (rc4030_dma *)g_new0(rc4030_dma, n);
+ p = (struct rc4030DMAState *)g_new0(struct rc4030DMAState, n);
+ for (i = 0; i < n; i++) {
+ p->opaque = opaque;
+ p->n = i;
+ s[i] = p;
+ p++;
+ }
+ return s;
+}
+
+static void rc4030_initfn(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ rc4030State *s = RC4030(obj);
+ SysBusDevice *sysbus = SYS_BUS_DEVICE(obj);
+
+ qdev_init_gpio_in(dev, rc4030_irq_jazz_request, 16);
+
+ sysbus_init_irq(sysbus, &s->timer_irq);
+ sysbus_init_irq(sysbus, &s->jazz_bus_irq);
+
+ sysbus_init_mmio(sysbus, &s->iomem_chipset);
+ sysbus_init_mmio(sysbus, &s->iomem_jazzio);
+}
+
+static void rc4030_realize(DeviceState *dev, Error **errp)
+{
+ rc4030State *s = RC4030(dev);
+ Object *o = OBJECT(dev);
+
+ s->periodic_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ rc4030_periodic_timer, s);
+
+ memory_region_init_io(&s->iomem_chipset, o, &rc4030_ops, s,
+ "rc4030.chipset", 0x300);
+ memory_region_init_io(&s->iomem_jazzio, o, &jazzio_ops, s,
+ "rc4030.jazzio", 0x00001000);
+
+ memory_region_init_iommu(&s->dma_mr, sizeof(s->dma_mr),
+ TYPE_RC4030_IOMMU_MEMORY_REGION,
+ o, "rc4030.dma", 4 * GiB);
+ address_space_init(&s->dma_as, MEMORY_REGION(&s->dma_mr), "rc4030-dma");
+}
+
+static void rc4030_unrealize(DeviceState *dev)
+{
+ rc4030State *s = RC4030(dev);
+
+ timer_free(s->periodic_timer);
+
+ address_space_destroy(&s->dma_as);
+ object_unparent(OBJECT(&s->dma_mr));
+}
+
+static void rc4030_class_init(ObjectClass *klass, void *class_data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = rc4030_realize;
+ dc->unrealize = rc4030_unrealize;
+ dc->reset = rc4030_reset;
+ dc->vmsd = &vmstate_rc4030;
+}
+
+static const TypeInfo rc4030_info = {
+ .name = TYPE_RC4030,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(rc4030State),
+ .instance_init = rc4030_initfn,
+ .class_init = rc4030_class_init,
+};
+
+static void rc4030_iommu_memory_region_class_init(ObjectClass *klass,
+ void *data)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
+
+ imrc->translate = rc4030_dma_translate;
+}
+
+static const TypeInfo rc4030_iommu_memory_region_info = {
+ .parent = TYPE_IOMMU_MEMORY_REGION,
+ .name = TYPE_RC4030_IOMMU_MEMORY_REGION,
+ .class_init = rc4030_iommu_memory_region_class_init,
+};
+
+static void rc4030_register_types(void)
+{
+ type_register_static(&rc4030_info);
+ type_register_static(&rc4030_iommu_memory_region_info);
+}
+
+type_init(rc4030_register_types)
+
+DeviceState *rc4030_init(rc4030_dma **dmas, IOMMUMemoryRegion **dma_mr)
+{
+ DeviceState *dev;
+
+ dev = qdev_new(TYPE_RC4030);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+
+ *dmas = rc4030_allocate_dmas(dev, 4);
+ *dma_mr = &RC4030(dev)->dma_mr;
+ return dev;
+}
diff --git a/hw/dma/sifive_pdma.c b/hw/dma/sifive_pdma.c
new file mode 100644
index 000000000..85fe34f5f
--- /dev/null
+++ b/hw/dma/sifive_pdma.c
@@ -0,0 +1,351 @@
+/*
+ * SiFive Platform DMA emulation
+ *
+ * Copyright (c) 2020 Wind River Systems, Inc.
+ *
+ * Author:
+ * Bin Meng <bin.meng@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/bitops.h"
+#include "qemu/log.h"
+#include "qapi/error.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "sysemu/dma.h"
+#include "hw/dma/sifive_pdma.h"
+
+#define DMA_CONTROL 0x000
+#define CONTROL_CLAIM BIT(0)
+#define CONTROL_RUN BIT(1)
+#define CONTROL_DONE_IE BIT(14)
+#define CONTROL_ERR_IE BIT(15)
+#define CONTROL_DONE BIT(30)
+#define CONTROL_ERR BIT(31)
+
+#define DMA_NEXT_CONFIG 0x004
+#define CONFIG_REPEAT BIT(2)
+#define CONFIG_ORDER BIT(3)
+#define CONFIG_WRSZ_SHIFT 24
+#define CONFIG_RDSZ_SHIFT 28
+#define CONFIG_SZ_MASK 0xf
+
+#define DMA_NEXT_BYTES 0x008
+#define DMA_NEXT_DST 0x010
+#define DMA_NEXT_SRC 0x018
+#define DMA_EXEC_CONFIG 0x104
+#define DMA_EXEC_BYTES 0x108
+#define DMA_EXEC_DST 0x110
+#define DMA_EXEC_SRC 0x118
+
+/*
+ * FU540/FU740 docs are incorrect with NextConfig.wsize/rsize reset values.
+ * The reset values tested on Unleashed/Unmatched boards are 6 instead of 0.
+ */
+#define CONFIG_WRSZ_DEFAULT 6
+#define CONFIG_RDSZ_DEFAULT 6
+
+enum dma_chan_state {
+ DMA_CHAN_STATE_IDLE,
+ DMA_CHAN_STATE_STARTED,
+ DMA_CHAN_STATE_ERROR,
+ DMA_CHAN_STATE_DONE
+};
+
+static void sifive_pdma_run(SiFivePDMAState *s, int ch)
+{
+ uint64_t bytes = s->chan[ch].next_bytes;
+ uint64_t dst = s->chan[ch].next_dst;
+ uint64_t src = s->chan[ch].next_src;
+ uint32_t config = s->chan[ch].next_config;
+ int wsize, rsize, size, remainder;
+ uint8_t buf[64];
+ int n;
+
+ /* do nothing if bytes to transfer is zero */
+ if (!bytes) {
+ goto done;
+ }
+
+ /*
+ * The manual does not describe how the hardware behaviors when
+ * config.wsize and config.rsize are given different values.
+ * A common case is memory to memory DMA, and in this case they
+ * are normally the same. Abort if this expectation fails.
+ */
+ wsize = (config >> CONFIG_WRSZ_SHIFT) & CONFIG_SZ_MASK;
+ rsize = (config >> CONFIG_RDSZ_SHIFT) & CONFIG_SZ_MASK;
+ if (wsize != rsize) {
+ goto error;
+ }
+
+ /*
+ * Calculate the transaction size
+ *
+ * size field is base 2 logarithm of DMA transaction size,
+ * but there is an upper limit of 64 bytes per transaction.
+ */
+ size = wsize;
+ if (size > 6) {
+ size = 6;
+ }
+ size = 1 << size;
+ remainder = bytes % size;
+
+ /* indicate a DMA transfer is started */
+ s->chan[ch].state = DMA_CHAN_STATE_STARTED;
+ s->chan[ch].control &= ~CONTROL_DONE;
+ s->chan[ch].control &= ~CONTROL_ERR;
+
+ /* load the next_ registers into their exec_ counterparts */
+ s->chan[ch].exec_config = config;
+ s->chan[ch].exec_bytes = bytes;
+ s->chan[ch].exec_dst = dst;
+ s->chan[ch].exec_src = src;
+
+ for (n = 0; n < bytes / size; n++) {
+ cpu_physical_memory_read(s->chan[ch].exec_src, buf, size);
+ cpu_physical_memory_write(s->chan[ch].exec_dst, buf, size);
+ s->chan[ch].exec_src += size;
+ s->chan[ch].exec_dst += size;
+ s->chan[ch].exec_bytes -= size;
+ }
+
+ if (remainder) {
+ cpu_physical_memory_read(s->chan[ch].exec_src, buf, remainder);
+ cpu_physical_memory_write(s->chan[ch].exec_dst, buf, remainder);
+ s->chan[ch].exec_src += remainder;
+ s->chan[ch].exec_dst += remainder;
+ s->chan[ch].exec_bytes -= remainder;
+ }
+
+ /* reload exec_ registers if repeat is required */
+ if (s->chan[ch].next_config & CONFIG_REPEAT) {
+ s->chan[ch].exec_bytes = bytes;
+ s->chan[ch].exec_dst = dst;
+ s->chan[ch].exec_src = src;
+ }
+
+done:
+ /* indicate a DMA transfer is done */
+ s->chan[ch].state = DMA_CHAN_STATE_DONE;
+ s->chan[ch].control &= ~CONTROL_RUN;
+ s->chan[ch].control |= CONTROL_DONE;
+ return;
+
+error:
+ s->chan[ch].state = DMA_CHAN_STATE_ERROR;
+ s->chan[ch].control |= CONTROL_ERR;
+ return;
+}
+
+static inline void sifive_pdma_update_irq(SiFivePDMAState *s, int ch)
+{
+ bool done_ie, err_ie;
+
+ done_ie = !!(s->chan[ch].control & CONTROL_DONE_IE);
+ err_ie = !!(s->chan[ch].control & CONTROL_ERR_IE);
+
+ if (done_ie && (s->chan[ch].control & CONTROL_DONE)) {
+ qemu_irq_raise(s->irq[ch * 2]);
+ } else {
+ qemu_irq_lower(s->irq[ch * 2]);
+ }
+
+ if (err_ie && (s->chan[ch].control & CONTROL_ERR)) {
+ qemu_irq_raise(s->irq[ch * 2 + 1]);
+ } else {
+ qemu_irq_lower(s->irq[ch * 2 + 1]);
+ }
+
+ s->chan[ch].state = DMA_CHAN_STATE_IDLE;
+}
+
+static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size)
+{
+ SiFivePDMAState *s = opaque;
+ int ch = SIFIVE_PDMA_CHAN_NO(offset);
+ uint64_t val = 0;
+
+ if (ch >= SIFIVE_PDMA_CHANS) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
+ __func__, ch);
+ return 0;
+ }
+
+ offset &= 0xfff;
+ switch (offset) {
+ case DMA_CONTROL:
+ val = s->chan[ch].control;
+ break;
+ case DMA_NEXT_CONFIG:
+ val = s->chan[ch].next_config;
+ break;
+ case DMA_NEXT_BYTES:
+ val = s->chan[ch].next_bytes;
+ break;
+ case DMA_NEXT_DST:
+ val = s->chan[ch].next_dst;
+ break;
+ case DMA_NEXT_SRC:
+ val = s->chan[ch].next_src;
+ break;
+ case DMA_EXEC_CONFIG:
+ val = s->chan[ch].exec_config;
+ break;
+ case DMA_EXEC_BYTES:
+ val = s->chan[ch].exec_bytes;
+ break;
+ case DMA_EXEC_DST:
+ val = s->chan[ch].exec_dst;
+ break;
+ case DMA_EXEC_SRC:
+ val = s->chan[ch].exec_src;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
+ __func__, offset);
+ break;
+ }
+
+ return val;
+}
+
+static void sifive_pdma_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ SiFivePDMAState *s = opaque;
+ int ch = SIFIVE_PDMA_CHAN_NO(offset);
+ bool claimed, run;
+
+ if (ch >= SIFIVE_PDMA_CHANS) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
+ __func__, ch);
+ return;
+ }
+
+ offset &= 0xfff;
+ switch (offset) {
+ case DMA_CONTROL:
+ claimed = !!(s->chan[ch].control & CONTROL_CLAIM);
+ run = !!(s->chan[ch].control & CONTROL_RUN);
+
+ if (!claimed && (value & CONTROL_CLAIM)) {
+ /* reset Next* registers */
+ s->chan[ch].next_config = (CONFIG_RDSZ_DEFAULT << CONFIG_RDSZ_SHIFT) |
+ (CONFIG_WRSZ_DEFAULT << CONFIG_WRSZ_SHIFT);
+ s->chan[ch].next_bytes = 0;
+ s->chan[ch].next_dst = 0;
+ s->chan[ch].next_src = 0;
+ }
+
+ /* claim bit can only be cleared when run is low */
+ if (run && !(value & CONTROL_CLAIM)) {
+ value |= CONTROL_CLAIM;
+ }
+
+ s->chan[ch].control = value;
+
+ /*
+ * If channel was not claimed before run bit is set,
+ * or if the channel is disclaimed when run was low,
+ * DMA won't run.
+ */
+ if (!claimed || (!run && !(value & CONTROL_CLAIM))) {
+ s->chan[ch].control &= ~CONTROL_RUN;
+ return;
+ }
+
+ if (value & CONTROL_RUN) {
+ sifive_pdma_run(s, ch);
+ }
+
+ sifive_pdma_update_irq(s, ch);
+ break;
+ case DMA_NEXT_CONFIG:
+ s->chan[ch].next_config = value;
+ break;
+ case DMA_NEXT_BYTES:
+ s->chan[ch].next_bytes = value;
+ break;
+ case DMA_NEXT_DST:
+ s->chan[ch].next_dst = value;
+ break;
+ case DMA_NEXT_SRC:
+ s->chan[ch].next_src = value;
+ break;
+ case DMA_EXEC_CONFIG:
+ case DMA_EXEC_BYTES:
+ case DMA_EXEC_DST:
+ case DMA_EXEC_SRC:
+ /* these are read-only registers */
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
+ __func__, offset);
+ break;
+ }
+}
+
+static const MemoryRegionOps sifive_pdma_ops = {
+ .read = sifive_pdma_read,
+ .write = sifive_pdma_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ /* there are 32-bit and 64-bit wide registers */
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ }
+};
+
+static void sifive_pdma_realize(DeviceState *dev, Error **errp)
+{
+ SiFivePDMAState *s = SIFIVE_PDMA(dev);
+ int i;
+
+ memory_region_init_io(&s->iomem, OBJECT(dev), &sifive_pdma_ops, s,
+ TYPE_SIFIVE_PDMA, SIFIVE_PDMA_REG_SIZE);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
+
+ for (i = 0; i < SIFIVE_PDMA_IRQS; i++) {
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]);
+ }
+}
+
+static void sifive_pdma_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "SiFive Platform DMA controller";
+ dc->realize = sifive_pdma_realize;
+}
+
+static const TypeInfo sifive_pdma_info = {
+ .name = TYPE_SIFIVE_PDMA,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SiFivePDMAState),
+ .class_init = sifive_pdma_class_init,
+};
+
+static void sifive_pdma_register_types(void)
+{
+ type_register_static(&sifive_pdma_info);
+}
+
+type_init(sifive_pdma_register_types)
diff --git a/hw/dma/soc_dma.c b/hw/dma/soc_dma.c
new file mode 100644
index 000000000..3a430057f
--- /dev/null
+++ b/hw/dma/soc_dma.c
@@ -0,0 +1,361 @@
+/*
+ * On-chip DMA controller framework.
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Written by Andrzej Zaborowski <andrew@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qemu/timer.h"
+#include "hw/arm/soc_dma.h"
+
+static void transfer_mem2mem(struct soc_dma_ch_s *ch)
+{
+ memcpy(ch->paddr[0], ch->paddr[1], ch->bytes);
+ ch->paddr[0] += ch->bytes;
+ ch->paddr[1] += ch->bytes;
+}
+
+static void transfer_mem2fifo(struct soc_dma_ch_s *ch)
+{
+ ch->io_fn[1](ch->io_opaque[1], ch->paddr[0], ch->bytes);
+ ch->paddr[0] += ch->bytes;
+}
+
+static void transfer_fifo2mem(struct soc_dma_ch_s *ch)
+{
+ ch->io_fn[0](ch->io_opaque[0], ch->paddr[1], ch->bytes);
+ ch->paddr[1] += ch->bytes;
+}
+
+/* This is further optimisable but isn't very important because often
+ * DMA peripherals forbid this kind of transfers and even when they don't,
+ * oprating systems may not need to use them. */
+static void *fifo_buf;
+static int fifo_size;
+static void transfer_fifo2fifo(struct soc_dma_ch_s *ch)
+{
+ if (ch->bytes > fifo_size)
+ fifo_buf = g_realloc(fifo_buf, fifo_size = ch->bytes);
+
+ /* Implement as transfer_fifo2linear + transfer_linear2fifo. */
+ ch->io_fn[0](ch->io_opaque[0], fifo_buf, ch->bytes);
+ ch->io_fn[1](ch->io_opaque[1], fifo_buf, ch->bytes);
+}
+
+struct dma_s {
+ struct soc_dma_s soc;
+ int chnum;
+ uint64_t ch_enable_mask;
+ int64_t channel_freq;
+ int enabled_count;
+
+ struct memmap_entry_s {
+ enum soc_dma_port_type type;
+ hwaddr addr;
+ union {
+ struct {
+ void *opaque;
+ soc_dma_io_t fn;
+ int out;
+ } fifo;
+ struct {
+ void *base;
+ size_t size;
+ } mem;
+ } u;
+ } *memmap;
+ int memmap_size;
+
+ struct soc_dma_ch_s ch[];
+};
+
+static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes)
+{
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ struct dma_s *dma = (struct dma_s *) ch->dma;
+
+ timer_mod(ch->timer, now + delay_bytes / dma->channel_freq);
+}
+
+static void soc_dma_ch_run(void *opaque)
+{
+ struct soc_dma_ch_s *ch = (struct soc_dma_ch_s *) opaque;
+
+ ch->running = 1;
+ ch->dma->setup_fn(ch);
+ ch->transfer_fn(ch);
+ ch->running = 0;
+
+ if (ch->enable)
+ soc_dma_ch_schedule(ch, ch->bytes);
+ ch->bytes = 0;
+}
+
+static inline struct memmap_entry_s *soc_dma_lookup(struct dma_s *dma,
+ hwaddr addr)
+{
+ struct memmap_entry_s *lo;
+ int hi;
+
+ lo = dma->memmap;
+ hi = dma->memmap_size;
+
+ while (hi > 1) {
+ hi /= 2;
+ if (lo[hi].addr <= addr)
+ lo += hi;
+ }
+
+ return lo;
+}
+
+static inline enum soc_dma_port_type soc_dma_ch_update_type(
+ struct soc_dma_ch_s *ch, int port)
+{
+ struct dma_s *dma = (struct dma_s *) ch->dma;
+ struct memmap_entry_s *entry = soc_dma_lookup(dma, ch->vaddr[port]);
+
+ if (entry->type == soc_dma_port_fifo) {
+ while (entry < dma->memmap + dma->memmap_size &&
+ entry->u.fifo.out != port)
+ entry ++;
+ if (entry->addr != ch->vaddr[port] || entry->u.fifo.out != port)
+ return soc_dma_port_other;
+
+ if (ch->type[port] != soc_dma_access_const)
+ return soc_dma_port_other;
+
+ ch->io_fn[port] = entry->u.fifo.fn;
+ ch->io_opaque[port] = entry->u.fifo.opaque;
+ return soc_dma_port_fifo;
+ } else if (entry->type == soc_dma_port_mem) {
+ if (entry->addr > ch->vaddr[port] ||
+ entry->addr + entry->u.mem.size <= ch->vaddr[port])
+ return soc_dma_port_other;
+
+ /* TODO: support constant memory address for source port as used for
+ * drawing solid rectangles by PalmOS(R). */
+ if (ch->type[port] != soc_dma_access_const)
+ return soc_dma_port_other;
+
+ ch->paddr[port] = (uint8_t *) entry->u.mem.base +
+ (ch->vaddr[port] - entry->addr);
+ /* TODO: save bytes left to the end of the mapping somewhere so we
+ * can check we're not reading beyond it. */
+ return soc_dma_port_mem;
+ } else
+ return soc_dma_port_other;
+}
+
+void soc_dma_ch_update(struct soc_dma_ch_s *ch)
+{
+ enum soc_dma_port_type src, dst;
+
+ src = soc_dma_ch_update_type(ch, 0);
+ if (src == soc_dma_port_other) {
+ ch->update = 0;
+ ch->transfer_fn = ch->dma->transfer_fn;
+ return;
+ }
+ dst = soc_dma_ch_update_type(ch, 1);
+
+ /* TODO: use src and dst as array indices. */
+ if (src == soc_dma_port_mem && dst == soc_dma_port_mem)
+ ch->transfer_fn = transfer_mem2mem;
+ else if (src == soc_dma_port_mem && dst == soc_dma_port_fifo)
+ ch->transfer_fn = transfer_mem2fifo;
+ else if (src == soc_dma_port_fifo && dst == soc_dma_port_mem)
+ ch->transfer_fn = transfer_fifo2mem;
+ else if (src == soc_dma_port_fifo && dst == soc_dma_port_fifo)
+ ch->transfer_fn = transfer_fifo2fifo;
+ else
+ ch->transfer_fn = ch->dma->transfer_fn;
+
+ ch->update = (dst != soc_dma_port_other);
+}
+
+static void soc_dma_ch_freq_update(struct dma_s *s)
+{
+ if (s->enabled_count)
+ /* We completely ignore channel priorities and stuff */
+ s->channel_freq = s->soc.freq / s->enabled_count;
+ else {
+ /* TODO: Signal that we want to disable the functional clock and let
+ * the platform code decide what to do with it, i.e. check that
+ * auto-idle is enabled in the clock controller and if we are stopping
+ * the clock, do the same with any parent clocks that had only one
+ * user keeping them on and auto-idle enabled. */
+ }
+}
+
+void soc_dma_set_request(struct soc_dma_ch_s *ch, int level)
+{
+ struct dma_s *dma = (struct dma_s *) ch->dma;
+
+ dma->enabled_count += level - ch->enable;
+
+ if (level)
+ dma->ch_enable_mask |= 1 << ch->num;
+ else
+ dma->ch_enable_mask &= ~(1 << ch->num);
+
+ if (level != ch->enable) {
+ soc_dma_ch_freq_update(dma);
+ ch->enable = level;
+
+ if (!ch->enable)
+ timer_del(ch->timer);
+ else if (!ch->running)
+ soc_dma_ch_run(ch);
+ else
+ soc_dma_ch_schedule(ch, 1);
+ }
+}
+
+void soc_dma_reset(struct soc_dma_s *soc)
+{
+ struct dma_s *s = (struct dma_s *) soc;
+
+ s->soc.drqbmp = 0;
+ s->ch_enable_mask = 0;
+ s->enabled_count = 0;
+ soc_dma_ch_freq_update(s);
+}
+
+/* TODO: take a functional-clock argument */
+struct soc_dma_s *soc_dma_init(int n)
+{
+ int i;
+ struct dma_s *s = g_malloc0(sizeof(*s) + n * sizeof(*s->ch));
+
+ s->chnum = n;
+ s->soc.ch = s->ch;
+ for (i = 0; i < n; i ++) {
+ s->ch[i].dma = &s->soc;
+ s->ch[i].num = i;
+ s->ch[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, soc_dma_ch_run, &s->ch[i]);
+ }
+
+ soc_dma_reset(&s->soc);
+ fifo_size = 0;
+
+ return &s->soc;
+}
+
+void soc_dma_port_add_fifo(struct soc_dma_s *soc, hwaddr virt_base,
+ soc_dma_io_t fn, void *opaque, int out)
+{
+ struct memmap_entry_s *entry;
+ struct dma_s *dma = (struct dma_s *) soc;
+
+ dma->memmap = g_realloc(dma->memmap, sizeof(*entry) *
+ (dma->memmap_size + 1));
+ entry = soc_dma_lookup(dma, virt_base);
+
+ if (dma->memmap_size) {
+ if (entry->type == soc_dma_port_mem) {
+ if (entry->addr <= virt_base &&
+ entry->addr + entry->u.mem.size > virt_base) {
+ error_report("%s: FIFO at %"PRIx64
+ " collides with RAM region at %"PRIx64
+ "-%"PRIx64, __func__,
+ virt_base, entry->addr,
+ (entry->addr + entry->u.mem.size));
+ exit(-1);
+ }
+
+ if (entry->addr <= virt_base)
+ entry ++;
+ } else
+ while (entry < dma->memmap + dma->memmap_size &&
+ entry->addr <= virt_base) {
+ if (entry->addr == virt_base && entry->u.fifo.out == out) {
+ error_report("%s: FIFO at %"PRIx64
+ " collides FIFO at %"PRIx64,
+ __func__, virt_base, entry->addr);
+ exit(-1);
+ }
+
+ entry ++;
+ }
+
+ memmove(entry + 1, entry,
+ (uint8_t *) (dma->memmap + dma->memmap_size ++) -
+ (uint8_t *) entry);
+ } else
+ dma->memmap_size ++;
+
+ entry->addr = virt_base;
+ entry->type = soc_dma_port_fifo;
+ entry->u.fifo.fn = fn;
+ entry->u.fifo.opaque = opaque;
+ entry->u.fifo.out = out;
+}
+
+void soc_dma_port_add_mem(struct soc_dma_s *soc, uint8_t *phys_base,
+ hwaddr virt_base, size_t size)
+{
+ struct memmap_entry_s *entry;
+ struct dma_s *dma = (struct dma_s *) soc;
+
+ dma->memmap = g_realloc(dma->memmap, sizeof(*entry) *
+ (dma->memmap_size + 1));
+ entry = soc_dma_lookup(dma, virt_base);
+
+ if (dma->memmap_size) {
+ if (entry->type == soc_dma_port_mem) {
+ if ((entry->addr >= virt_base && entry->addr < virt_base + size) ||
+ (entry->addr <= virt_base &&
+ entry->addr + entry->u.mem.size > virt_base)) {
+ error_report("%s: RAM at %"PRIx64 "-%"PRIx64
+ " collides with RAM region at %"PRIx64
+ "-%"PRIx64, __func__,
+ virt_base, virt_base + size,
+ entry->addr, entry->addr + entry->u.mem.size);
+ exit(-1);
+ }
+
+ if (entry->addr <= virt_base)
+ entry ++;
+ } else {
+ if (entry->addr >= virt_base &&
+ entry->addr < virt_base + size) {
+ error_report("%s: RAM at %"PRIx64 "-%"PRIx64
+ " collides with FIFO at %"PRIx64,
+ __func__, virt_base, virt_base + size,
+ entry->addr);
+ exit(-1);
+ }
+
+ while (entry < dma->memmap + dma->memmap_size &&
+ entry->addr <= virt_base)
+ entry ++;
+ }
+
+ memmove(entry + 1, entry,
+ (uint8_t *) (dma->memmap + dma->memmap_size ++) -
+ (uint8_t *) entry);
+ } else
+ dma->memmap_size ++;
+
+ entry->addr = virt_base;
+ entry->type = soc_dma_port_mem;
+ entry->u.mem.base = phys_base;
+ entry->u.mem.size = size;
+}
+
+/* TODO: port removal for ports like PCMCIA memory */
diff --git a/hw/dma/sparc32_dma.c b/hw/dma/sparc32_dma.c
new file mode 100644
index 000000000..03bc50087
--- /dev/null
+++ b/hw/dma/sparc32_dma.c
@@ -0,0 +1,449 @@
+/*
+ * QEMU Sparc32 DMA controller emulation
+ *
+ * Copyright (c) 2006 Fabrice Bellard
+ *
+ * Modifications:
+ * 2010-Feb-14 Artyom Tarasenko : reworked irq generation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/sparc/sparc32_dma.h"
+#include "hw/sparc/sun4m_iommu.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "sysemu/dma.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "trace.h"
+
+/*
+ * This is the DMA controller part of chip STP2000 (Master I/O), also
+ * produced as NCR89C100. See
+ * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
+ * and
+ * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/DMA2.txt
+ */
+
+#define DMA_SIZE (4 * sizeof(uint32_t))
+/* We need the mask, because one instance of the device is not page
+ aligned (ledma, start address 0x0010) */
+#define DMA_MASK (DMA_SIZE - 1)
+/* OBP says 0x20 bytes for ledma, the extras are aliased to espdma */
+#define DMA_ETH_SIZE (8 * sizeof(uint32_t))
+#define DMA_MAX_REG_OFFSET (2 * DMA_SIZE - 1)
+
+#define DMA_VER 0xa0000000
+#define DMA_INTR 1
+#define DMA_INTREN 0x10
+#define DMA_WRITE_MEM 0x100
+#define DMA_EN 0x200
+#define DMA_LOADED 0x04000000
+#define DMA_DRAIN_FIFO 0x40
+#define DMA_RESET 0x80
+
+/* XXX SCSI and ethernet should have different read-only bit masks */
+#define DMA_CSR_RO_MASK 0xfe000007
+
+enum {
+ GPIO_RESET = 0,
+ GPIO_DMA,
+};
+
+/* Note: on sparc, the lance 16 bit bus is swapped */
+void ledma_memory_read(void *opaque, hwaddr addr,
+ uint8_t *buf, int len, int do_bswap)
+{
+ DMADeviceState *s = opaque;
+ IOMMUState *is = (IOMMUState *)s->iommu;
+ int i;
+
+ addr |= s->dmaregs[3];
+ trace_ledma_memory_read(addr, len);
+ if (do_bswap) {
+ dma_memory_read(&is->iommu_as, addr, buf, len);
+ } else {
+ addr &= ~1;
+ len &= ~1;
+ dma_memory_read(&is->iommu_as, addr, buf, len);
+ for(i = 0; i < len; i += 2) {
+ bswap16s((uint16_t *)(buf + i));
+ }
+ }
+}
+
+void ledma_memory_write(void *opaque, hwaddr addr,
+ uint8_t *buf, int len, int do_bswap)
+{
+ DMADeviceState *s = opaque;
+ IOMMUState *is = (IOMMUState *)s->iommu;
+ int l, i;
+ uint16_t tmp_buf[32];
+
+ addr |= s->dmaregs[3];
+ trace_ledma_memory_write(addr, len);
+ if (do_bswap) {
+ dma_memory_write(&is->iommu_as, addr, buf, len);
+ } else {
+ addr &= ~1;
+ len &= ~1;
+ while (len > 0) {
+ l = len;
+ if (l > sizeof(tmp_buf))
+ l = sizeof(tmp_buf);
+ for(i = 0; i < l; i += 2) {
+ tmp_buf[i >> 1] = bswap16(*(uint16_t *)(buf + i));
+ }
+ dma_memory_write(&is->iommu_as, addr, tmp_buf, l);
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+ }
+}
+
+static void dma_set_irq(void *opaque, int irq, int level)
+{
+ DMADeviceState *s = opaque;
+ if (level) {
+ s->dmaregs[0] |= DMA_INTR;
+ if (s->dmaregs[0] & DMA_INTREN) {
+ trace_sparc32_dma_set_irq_raise();
+ qemu_irq_raise(s->irq);
+ }
+ } else {
+ if (s->dmaregs[0] & DMA_INTR) {
+ s->dmaregs[0] &= ~DMA_INTR;
+ if (s->dmaregs[0] & DMA_INTREN) {
+ trace_sparc32_dma_set_irq_lower();
+ qemu_irq_lower(s->irq);
+ }
+ }
+ }
+}
+
+void espdma_memory_read(void *opaque, uint8_t *buf, int len)
+{
+ DMADeviceState *s = opaque;
+ IOMMUState *is = (IOMMUState *)s->iommu;
+
+ trace_espdma_memory_read(s->dmaregs[1], len);
+ dma_memory_read(&is->iommu_as, s->dmaregs[1], buf, len);
+ s->dmaregs[1] += len;
+}
+
+void espdma_memory_write(void *opaque, uint8_t *buf, int len)
+{
+ DMADeviceState *s = opaque;
+ IOMMUState *is = (IOMMUState *)s->iommu;
+
+ trace_espdma_memory_write(s->dmaregs[1], len);
+ dma_memory_write(&is->iommu_as, s->dmaregs[1], buf, len);
+ s->dmaregs[1] += len;
+}
+
+static uint64_t dma_mem_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ DMADeviceState *s = opaque;
+ uint32_t saddr;
+
+ saddr = (addr & DMA_MASK) >> 2;
+ trace_sparc32_dma_mem_readl(addr, s->dmaregs[saddr]);
+ return s->dmaregs[saddr];
+}
+
+static void dma_mem_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ DMADeviceState *s = opaque;
+ uint32_t saddr;
+
+ saddr = (addr & DMA_MASK) >> 2;
+ trace_sparc32_dma_mem_writel(addr, s->dmaregs[saddr], val);
+ switch (saddr) {
+ case 0:
+ if (val & DMA_INTREN) {
+ if (s->dmaregs[0] & DMA_INTR) {
+ trace_sparc32_dma_set_irq_raise();
+ qemu_irq_raise(s->irq);
+ }
+ } else {
+ if (s->dmaregs[0] & (DMA_INTR | DMA_INTREN)) {
+ trace_sparc32_dma_set_irq_lower();
+ qemu_irq_lower(s->irq);
+ }
+ }
+ if (val & DMA_RESET) {
+ qemu_irq_raise(s->gpio[GPIO_RESET]);
+ qemu_irq_lower(s->gpio[GPIO_RESET]);
+ } else if (val & DMA_DRAIN_FIFO) {
+ val &= ~DMA_DRAIN_FIFO;
+ } else if (val == 0)
+ val = DMA_DRAIN_FIFO;
+
+ if (val & DMA_EN && !(s->dmaregs[0] & DMA_EN)) {
+ trace_sparc32_dma_enable_raise();
+ qemu_irq_raise(s->gpio[GPIO_DMA]);
+ } else if (!(val & DMA_EN) && !!(s->dmaregs[0] & DMA_EN)) {
+ trace_sparc32_dma_enable_lower();
+ qemu_irq_lower(s->gpio[GPIO_DMA]);
+ }
+
+ val &= ~DMA_CSR_RO_MASK;
+ val |= DMA_VER;
+ s->dmaregs[0] = (s->dmaregs[0] & DMA_CSR_RO_MASK) | val;
+ break;
+ case 1:
+ s->dmaregs[0] |= DMA_LOADED;
+ /* fall through */
+ default:
+ s->dmaregs[saddr] = val;
+ break;
+ }
+}
+
+static const MemoryRegionOps dma_mem_ops = {
+ .read = dma_mem_read,
+ .write = dma_mem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void sparc32_dma_device_reset(DeviceState *d)
+{
+ DMADeviceState *s = SPARC32_DMA_DEVICE(d);
+
+ memset(s->dmaregs, 0, DMA_SIZE);
+ s->dmaregs[0] = DMA_VER;
+}
+
+static const VMStateDescription vmstate_sparc32_dma_device = {
+ .name ="sparc32_dma",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(dmaregs, DMADeviceState, DMA_REGS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void sparc32_dma_device_init(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ DMADeviceState *s = SPARC32_DMA_DEVICE(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ sysbus_init_irq(sbd, &s->irq);
+
+ sysbus_init_mmio(sbd, &s->iomem);
+
+ object_property_add_link(OBJECT(dev), "iommu", TYPE_SUN4M_IOMMU,
+ (Object **) &s->iommu,
+ qdev_prop_allow_set_link_before_realize,
+ 0);
+
+ qdev_init_gpio_in(dev, dma_set_irq, 1);
+ qdev_init_gpio_out(dev, s->gpio, 2);
+}
+
+static void sparc32_dma_device_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = sparc32_dma_device_reset;
+ dc->vmsd = &vmstate_sparc32_dma_device;
+}
+
+static const TypeInfo sparc32_dma_device_info = {
+ .name = TYPE_SPARC32_DMA_DEVICE,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .abstract = true,
+ .instance_size = sizeof(DMADeviceState),
+ .instance_init = sparc32_dma_device_init,
+ .class_init = sparc32_dma_device_class_init,
+};
+
+static void sparc32_espdma_device_init(Object *obj)
+{
+ DMADeviceState *s = SPARC32_DMA_DEVICE(obj);
+ ESPDMADeviceState *es = SPARC32_ESPDMA_DEVICE(obj);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &dma_mem_ops, s,
+ "espdma-mmio", DMA_SIZE);
+
+ object_initialize_child(obj, "esp", &es->esp, TYPE_SYSBUS_ESP);
+}
+
+static void sparc32_espdma_device_realize(DeviceState *dev, Error **errp)
+{
+ ESPDMADeviceState *es = SPARC32_ESPDMA_DEVICE(dev);
+ SysBusESPState *sysbus = SYSBUS_ESP(&es->esp);
+ ESPState *esp = &sysbus->esp;
+
+ esp->dma_memory_read = espdma_memory_read;
+ esp->dma_memory_write = espdma_memory_write;
+ esp->dma_opaque = SPARC32_DMA_DEVICE(dev);
+ sysbus->it_shift = 2;
+ esp->dma_enabled = 1;
+ sysbus_realize(SYS_BUS_DEVICE(sysbus), &error_fatal);
+}
+
+static void sparc32_espdma_device_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = sparc32_espdma_device_realize;
+}
+
+static const TypeInfo sparc32_espdma_device_info = {
+ .name = TYPE_SPARC32_ESPDMA_DEVICE,
+ .parent = TYPE_SPARC32_DMA_DEVICE,
+ .instance_size = sizeof(ESPDMADeviceState),
+ .instance_init = sparc32_espdma_device_init,
+ .class_init = sparc32_espdma_device_class_init,
+};
+
+static void sparc32_ledma_device_init(Object *obj)
+{
+ DMADeviceState *s = SPARC32_DMA_DEVICE(obj);
+ LEDMADeviceState *ls = SPARC32_LEDMA_DEVICE(obj);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &dma_mem_ops, s,
+ "ledma-mmio", DMA_SIZE);
+
+ object_initialize_child(obj, "lance", &ls->lance, TYPE_LANCE);
+}
+
+static void sparc32_ledma_device_realize(DeviceState *dev, Error **errp)
+{
+ LEDMADeviceState *s = SPARC32_LEDMA_DEVICE(dev);
+ SysBusPCNetState *lance = SYSBUS_PCNET(&s->lance);
+
+ object_property_set_link(OBJECT(lance), "dma", OBJECT(dev), &error_abort);
+ sysbus_realize(SYS_BUS_DEVICE(lance), &error_fatal);
+}
+
+static void sparc32_ledma_device_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = sparc32_ledma_device_realize;
+}
+
+static const TypeInfo sparc32_ledma_device_info = {
+ .name = TYPE_SPARC32_LEDMA_DEVICE,
+ .parent = TYPE_SPARC32_DMA_DEVICE,
+ .instance_size = sizeof(LEDMADeviceState),
+ .instance_init = sparc32_ledma_device_init,
+ .class_init = sparc32_ledma_device_class_init,
+};
+
+static void sparc32_dma_realize(DeviceState *dev, Error **errp)
+{
+ SPARC32DMAState *s = SPARC32_DMA(dev);
+ DeviceState *espdma, *esp, *ledma, *lance;
+ SysBusDevice *sbd;
+ Object *iommu;
+
+ iommu = object_resolve_path_type("", TYPE_SUN4M_IOMMU, NULL);
+ if (!iommu) {
+ error_setg(errp, "unable to locate sun4m IOMMU device");
+ return;
+ }
+
+ espdma = DEVICE(&s->espdma);
+ object_property_set_link(OBJECT(espdma), "iommu", iommu, &error_abort);
+ sysbus_realize(SYS_BUS_DEVICE(espdma), &error_fatal);
+
+ esp = DEVICE(object_resolve_path_component(OBJECT(espdma), "esp"));
+ sbd = SYS_BUS_DEVICE(esp);
+ sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(espdma, 0));
+ qdev_connect_gpio_out(espdma, 0, qdev_get_gpio_in(esp, 0));
+ qdev_connect_gpio_out(espdma, 1, qdev_get_gpio_in(esp, 1));
+
+ sbd = SYS_BUS_DEVICE(espdma);
+ memory_region_add_subregion(&s->dmamem, 0x0,
+ sysbus_mmio_get_region(sbd, 0));
+
+ ledma = DEVICE(&s->ledma);
+ object_property_set_link(OBJECT(ledma), "iommu", iommu, &error_abort);
+ sysbus_realize(SYS_BUS_DEVICE(ledma), &error_fatal);
+
+ lance = DEVICE(object_resolve_path_component(OBJECT(ledma), "lance"));
+ sbd = SYS_BUS_DEVICE(lance);
+ sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(ledma, 0));
+ qdev_connect_gpio_out(ledma, 0, qdev_get_gpio_in(lance, 0));
+
+ sbd = SYS_BUS_DEVICE(ledma);
+ memory_region_add_subregion(&s->dmamem, 0x10,
+ sysbus_mmio_get_region(sbd, 0));
+
+ /* Add ledma alias to handle SunOS 5.7 - Solaris 9 invalid access bug */
+ memory_region_init_alias(&s->ledma_alias, OBJECT(dev), "ledma-alias",
+ sysbus_mmio_get_region(sbd, 0), 0x4, 0x4);
+ memory_region_add_subregion(&s->dmamem, 0x20, &s->ledma_alias);
+}
+
+static void sparc32_dma_init(Object *obj)
+{
+ SPARC32DMAState *s = SPARC32_DMA(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ memory_region_init(&s->dmamem, OBJECT(s), "dma", DMA_SIZE + DMA_ETH_SIZE);
+ sysbus_init_mmio(sbd, &s->dmamem);
+
+ object_initialize_child(obj, "espdma", &s->espdma,
+ TYPE_SPARC32_ESPDMA_DEVICE);
+ object_initialize_child(obj, "ledma", &s->ledma,
+ TYPE_SPARC32_LEDMA_DEVICE);
+}
+
+static void sparc32_dma_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = sparc32_dma_realize;
+}
+
+static const TypeInfo sparc32_dma_info = {
+ .name = TYPE_SPARC32_DMA,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SPARC32DMAState),
+ .instance_init = sparc32_dma_init,
+ .class_init = sparc32_dma_class_init,
+};
+
+
+static void sparc32_dma_register_types(void)
+{
+ type_register_static(&sparc32_dma_device_info);
+ type_register_static(&sparc32_espdma_device_info);
+ type_register_static(&sparc32_ledma_device_info);
+ type_register_static(&sparc32_dma_info);
+}
+
+type_init(sparc32_dma_register_types)
diff --git a/hw/dma/trace-events b/hw/dma/trace-events
new file mode 100644
index 000000000..3c47df54e
--- /dev/null
+++ b/hw/dma/trace-events
@@ -0,0 +1,46 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# rc4030.c
+jazzio_read(uint64_t addr, uint32_t ret) "read reg[0x%"PRIx64"] = 0x%x"
+jazzio_write(uint64_t addr, uint32_t val) "write reg[0x%"PRIx64"] = 0x%x"
+rc4030_read(uint64_t addr, uint32_t ret) "read reg[0x%"PRIx64"] = 0x%x"
+rc4030_write(uint64_t addr, uint32_t val) "write reg[0x%"PRIx64"] = 0x%x"
+
+# sparc32_dma.c
+ledma_memory_read(uint64_t addr, int len) "DMA read addr 0x%"PRIx64 " len %d"
+ledma_memory_write(uint64_t addr, int len) "DMA write addr 0x%"PRIx64 " len %d"
+sparc32_dma_set_irq_raise(void) "Raise IRQ"
+sparc32_dma_set_irq_lower(void) "Lower IRQ"
+espdma_memory_read(uint32_t addr, int len) "DMA read addr 0x%08x len %d"
+espdma_memory_write(uint32_t addr, int len) "DMA write addr 0x%08x len %d"
+sparc32_dma_mem_readl(uint64_t addr, uint32_t ret) "read dmareg 0x%"PRIx64": 0x%08x"
+sparc32_dma_mem_writel(uint64_t addr, uint32_t old, uint32_t val) "write dmareg 0x%"PRIx64": 0x%08x -> 0x%08x"
+sparc32_dma_enable_raise(void) "Raise DMA enable"
+sparc32_dma_enable_lower(void) "Lower DMA enable"
+
+# i8257.c
+i8257_unregistered_dma(int nchan, int dma_pos, int dma_len) "unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d"
+
+# pl330.c
+pl330_fault(void *ptr, uint32_t flags) "ch: %p, flags: 0x%"PRIx32
+pl330_fault_abort(void) "abort interrupt raised"
+pl330_dmaend(void) "DMA ending"
+pl330_dmago(void) "DMA run"
+pl330_dmald(uint8_t chan, uint32_t addr, uint32_t size, uint32_t num, char ch) "channel:%"PRId8" address:0x%08"PRIx32" size:0x%"PRIx32" num:%"PRId32"%c"
+pl330_dmakill(void) "abort interrupt lowered"
+pl330_dmalpend(uint8_t nf, uint8_t bs, uint8_t lc, uint8_t ch, uint8_t flag) "nf=0x%02x bs=0x%02x lc=0x%02x ch=0x%02x flag=0x%02x"
+pl330_dmalpiter(void) "loop reiteration"
+pl330_dmalpfallthrough(void) "loop fallthrough"
+pl330_dmasev_evirq(uint8_t ev_id) "event interrupt raised %"PRId8
+pl330_dmasev_event(uint8_t ev_id) "event raised %"PRId8
+pl330_dmast(uint8_t chan, uint32_t addr, uint32_t sz, uint32_t num, char ch) "channel:%"PRId8" address:0x%08"PRIx32" size:0x%"PRIx32" num:%"PRId32" %c"
+pl330_dmawfe(uint8_t ev_id) "event lowered 0x%"PRIx8
+pl330_chan_exec_undef(void) "undefined instruction"
+pl330_exec_cycle(uint32_t addr, uint32_t size) "PL330 read from memory @0x%08"PRIx32" (size = 0x%08"PRIx32")"
+pl330_hexdump(uint32_t offset, char *str) " 0x%04"PRIx32":%s"
+pl330_exec(void) "pl330_exec"
+pl330_debug_exec(uint8_t ch) "chan id: 0x%"PRIx8
+pl330_debug_exec_stall(void) "stall of debug instruction not implemented"
+pl330_iomem_write(uint32_t offset, uint32_t value) "addr: 0x%08"PRIx32" data: 0x%08"PRIx32
+pl330_iomem_write_clr(int i) "event interrupt lowered %d"
+pl330_iomem_read(uint32_t addr, uint32_t data) "addr: 0x%08"PRIx32" data: 0x%08"PRIx32
diff --git a/hw/dma/trace.h b/hw/dma/trace.h
new file mode 100644
index 000000000..4bcb28b47
--- /dev/null
+++ b/hw/dma/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-hw_dma.h"
diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c
new file mode 100644
index 000000000..bc383f53c
--- /dev/null
+++ b/hw/dma/xilinx_axidma.c
@@ -0,0 +1,662 @@
+/*
+ * QEMU model of Xilinx AXI-DMA block.
+ *
+ * Copyright (c) 2011 Edgar E. Iglesias.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "qapi/error.h"
+#include "qemu/timer.h"
+#include "hw/hw.h"
+#include "hw/irq.h"
+#include "hw/ptimer.h"
+#include "hw/qdev-properties.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+
+#include "sysemu/dma.h"
+#include "hw/stream.h"
+#include "qom/object.h"
+
+#define D(x)
+
+#define TYPE_XILINX_AXI_DMA "xlnx.axi-dma"
+#define TYPE_XILINX_AXI_DMA_DATA_STREAM "xilinx-axi-dma-data-stream"
+#define TYPE_XILINX_AXI_DMA_CONTROL_STREAM "xilinx-axi-dma-control-stream"
+
+OBJECT_DECLARE_SIMPLE_TYPE(XilinxAXIDMA, XILINX_AXI_DMA)
+
+typedef struct XilinxAXIDMAStreamSink XilinxAXIDMAStreamSink;
+DECLARE_INSTANCE_CHECKER(XilinxAXIDMAStreamSink, XILINX_AXI_DMA_DATA_STREAM,
+ TYPE_XILINX_AXI_DMA_DATA_STREAM)
+
+DECLARE_INSTANCE_CHECKER(XilinxAXIDMAStreamSink, XILINX_AXI_DMA_CONTROL_STREAM,
+ TYPE_XILINX_AXI_DMA_CONTROL_STREAM)
+
+#define R_DMACR (0x00 / 4)
+#define R_DMASR (0x04 / 4)
+#define R_CURDESC (0x08 / 4)
+#define R_TAILDESC (0x10 / 4)
+#define R_MAX (0x30 / 4)
+
+#define CONTROL_PAYLOAD_WORDS 5
+#define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t)))
+
+
+enum {
+ DMACR_RUNSTOP = 1,
+ DMACR_TAILPTR_MODE = 2,
+ DMACR_RESET = 4
+};
+
+enum {
+ DMASR_HALTED = 1,
+ DMASR_IDLE = 2,
+ DMASR_IOC_IRQ = 1 << 12,
+ DMASR_DLY_IRQ = 1 << 13,
+
+ DMASR_IRQ_MASK = 7 << 12
+};
+
+struct SDesc {
+ uint64_t nxtdesc;
+ uint64_t buffer_address;
+ uint64_t reserved;
+ uint32_t control;
+ uint32_t status;
+ uint8_t app[CONTROL_PAYLOAD_SIZE];
+};
+
+enum {
+ SDESC_CTRL_EOF = (1 << 26),
+ SDESC_CTRL_SOF = (1 << 27),
+
+ SDESC_CTRL_LEN_MASK = (1 << 23) - 1
+};
+
+enum {
+ SDESC_STATUS_EOF = (1 << 26),
+ SDESC_STATUS_SOF_BIT = 27,
+ SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
+ SDESC_STATUS_COMPLETE = (1 << 31)
+};
+
+struct Stream {
+ struct XilinxAXIDMA *dma;
+ ptimer_state *ptimer;
+ qemu_irq irq;
+
+ int nr;
+
+ bool sof;
+ struct SDesc desc;
+ unsigned int complete_cnt;
+ uint32_t regs[R_MAX];
+ uint8_t app[20];
+ unsigned char txbuf[16 * 1024];
+};
+
+struct XilinxAXIDMAStreamSink {
+ Object parent;
+
+ struct XilinxAXIDMA *dma;
+};
+
+struct XilinxAXIDMA {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ MemoryRegion *dma_mr;
+ AddressSpace as;
+
+ uint32_t freqhz;
+ StreamSink *tx_data_dev;
+ StreamSink *tx_control_dev;
+ XilinxAXIDMAStreamSink rx_data_dev;
+ XilinxAXIDMAStreamSink rx_control_dev;
+
+ struct Stream streams[2];
+
+ StreamCanPushNotifyFn notify;
+ void *notify_opaque;
+};
+
+/*
+ * Helper calls to extract info from descriptors and other trivial
+ * state from regs.
+ */
+static inline int stream_desc_sof(struct SDesc *d)
+{
+ return d->control & SDESC_CTRL_SOF;
+}
+
+static inline int stream_desc_eof(struct SDesc *d)
+{
+ return d->control & SDESC_CTRL_EOF;
+}
+
+static inline int stream_resetting(struct Stream *s)
+{
+ return !!(s->regs[R_DMACR] & DMACR_RESET);
+}
+
+static inline int stream_running(struct Stream *s)
+{
+ return s->regs[R_DMACR] & DMACR_RUNSTOP;
+}
+
+static inline int stream_idle(struct Stream *s)
+{
+ return !!(s->regs[R_DMASR] & DMASR_IDLE);
+}
+
+static void stream_reset(struct Stream *s)
+{
+ s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */
+ s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold. */
+ s->sof = true;
+}
+
+/* Map an offset addr into a channel index. */
+static inline int streamid_from_addr(hwaddr addr)
+{
+ int sid;
+
+ sid = addr / (0x30);
+ sid &= 1;
+ return sid;
+}
+
+static void stream_desc_load(struct Stream *s, hwaddr addr)
+{
+ struct SDesc *d = &s->desc;
+
+ address_space_read(&s->dma->as, addr, MEMTXATTRS_UNSPECIFIED, d, sizeof *d);
+
+ /* Convert from LE into host endianness. */
+ d->buffer_address = le64_to_cpu(d->buffer_address);
+ d->nxtdesc = le64_to_cpu(d->nxtdesc);
+ d->control = le32_to_cpu(d->control);
+ d->status = le32_to_cpu(d->status);
+}
+
+static void stream_desc_store(struct Stream *s, hwaddr addr)
+{
+ struct SDesc *d = &s->desc;
+
+ /* Convert from host endianness into LE. */
+ d->buffer_address = cpu_to_le64(d->buffer_address);
+ d->nxtdesc = cpu_to_le64(d->nxtdesc);
+ d->control = cpu_to_le32(d->control);
+ d->status = cpu_to_le32(d->status);
+ address_space_write(&s->dma->as, addr, MEMTXATTRS_UNSPECIFIED,
+ d, sizeof *d);
+}
+
+static void stream_update_irq(struct Stream *s)
+{
+ unsigned int pending, mask, irq;
+
+ pending = s->regs[R_DMASR] & DMASR_IRQ_MASK;
+ mask = s->regs[R_DMACR] & DMASR_IRQ_MASK;
+
+ irq = pending & mask;
+
+ qemu_set_irq(s->irq, !!irq);
+}
+
+static void stream_reload_complete_cnt(struct Stream *s)
+{
+ unsigned int comp_th;
+ comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
+ s->complete_cnt = comp_th;
+}
+
+static void timer_hit(void *opaque)
+{
+ struct Stream *s = opaque;
+
+ stream_reload_complete_cnt(s);
+ s->regs[R_DMASR] |= DMASR_DLY_IRQ;
+ stream_update_irq(s);
+}
+
+static void stream_complete(struct Stream *s)
+{
+ unsigned int comp_delay;
+
+ /* Start the delayed timer. */
+ ptimer_transaction_begin(s->ptimer);
+ comp_delay = s->regs[R_DMACR] >> 24;
+ if (comp_delay) {
+ ptimer_stop(s->ptimer);
+ ptimer_set_count(s->ptimer, comp_delay);
+ ptimer_run(s->ptimer, 1);
+ }
+
+ s->complete_cnt--;
+ if (s->complete_cnt == 0) {
+ /* Raise the IOC irq. */
+ s->regs[R_DMASR] |= DMASR_IOC_IRQ;
+ stream_reload_complete_cnt(s);
+ }
+ ptimer_transaction_commit(s->ptimer);
+}
+
+static void stream_process_mem2s(struct Stream *s, StreamSink *tx_data_dev,
+ StreamSink *tx_control_dev)
+{
+ uint32_t prev_d;
+ uint32_t txlen;
+ uint64_t addr;
+ bool eop;
+
+ if (!stream_running(s) || stream_idle(s)) {
+ return;
+ }
+
+ while (1) {
+ stream_desc_load(s, s->regs[R_CURDESC]);
+
+ if (s->desc.status & SDESC_STATUS_COMPLETE) {
+ s->regs[R_DMASR] |= DMASR_HALTED;
+ break;
+ }
+
+ if (stream_desc_sof(&s->desc)) {
+ stream_push(tx_control_dev, s->desc.app, sizeof(s->desc.app), true);
+ }
+
+ txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
+
+ eop = stream_desc_eof(&s->desc);
+ addr = s->desc.buffer_address;
+ while (txlen) {
+ unsigned int len;
+
+ len = txlen > sizeof s->txbuf ? sizeof s->txbuf : txlen;
+ address_space_read(&s->dma->as, addr,
+ MEMTXATTRS_UNSPECIFIED,
+ s->txbuf, len);
+ stream_push(tx_data_dev, s->txbuf, len, eop && len == txlen);
+ txlen -= len;
+ addr += len;
+ }
+
+ if (eop) {
+ stream_complete(s);
+ }
+
+ /* Update the descriptor. */
+ s->desc.status = txlen | SDESC_STATUS_COMPLETE;
+ stream_desc_store(s, s->regs[R_CURDESC]);
+
+ /* Advance. */
+ prev_d = s->regs[R_CURDESC];
+ s->regs[R_CURDESC] = s->desc.nxtdesc;
+ if (prev_d == s->regs[R_TAILDESC]) {
+ s->regs[R_DMASR] |= DMASR_IDLE;
+ break;
+ }
+ }
+}
+
+static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf,
+ size_t len, bool eop)
+{
+ uint32_t prev_d;
+ unsigned int rxlen;
+ size_t pos = 0;
+
+ if (!stream_running(s) || stream_idle(s)) {
+ return 0;
+ }
+
+ while (len) {
+ stream_desc_load(s, s->regs[R_CURDESC]);
+
+ if (s->desc.status & SDESC_STATUS_COMPLETE) {
+ s->regs[R_DMASR] |= DMASR_HALTED;
+ break;
+ }
+
+ rxlen = s->desc.control & SDESC_CTRL_LEN_MASK;
+ if (rxlen > len) {
+ /* It fits. */
+ rxlen = len;
+ }
+
+ address_space_write(&s->dma->as, s->desc.buffer_address,
+ MEMTXATTRS_UNSPECIFIED, buf + pos, rxlen);
+ len -= rxlen;
+ pos += rxlen;
+
+ /* Update the descriptor. */
+ if (eop) {
+ stream_complete(s);
+ memcpy(s->desc.app, s->app, sizeof(s->desc.app));
+ s->desc.status |= SDESC_STATUS_EOF;
+ }
+
+ s->desc.status |= s->sof << SDESC_STATUS_SOF_BIT;
+ s->desc.status |= SDESC_STATUS_COMPLETE;
+ stream_desc_store(s, s->regs[R_CURDESC]);
+ s->sof = eop;
+
+ /* Advance. */
+ prev_d = s->regs[R_CURDESC];
+ s->regs[R_CURDESC] = s->desc.nxtdesc;
+ if (prev_d == s->regs[R_TAILDESC]) {
+ s->regs[R_DMASR] |= DMASR_IDLE;
+ break;
+ }
+ }
+
+ return pos;
+}
+
+static void xilinx_axidma_reset(DeviceState *dev)
+{
+ int i;
+ XilinxAXIDMA *s = XILINX_AXI_DMA(dev);
+
+ for (i = 0; i < 2; i++) {
+ stream_reset(&s->streams[i]);
+ }
+}
+
+static size_t
+xilinx_axidma_control_stream_push(StreamSink *obj, unsigned char *buf,
+ size_t len, bool eop)
+{
+ XilinxAXIDMAStreamSink *cs = XILINX_AXI_DMA_CONTROL_STREAM(obj);
+ struct Stream *s = &cs->dma->streams[1];
+
+ if (len != CONTROL_PAYLOAD_SIZE) {
+ hw_error("AXI DMA requires %d byte control stream payload\n",
+ (int)CONTROL_PAYLOAD_SIZE);
+ }
+
+ memcpy(s->app, buf, len);
+ return len;
+}
+
+static bool
+xilinx_axidma_data_stream_can_push(StreamSink *obj,
+ StreamCanPushNotifyFn notify,
+ void *notify_opaque)
+{
+ XilinxAXIDMAStreamSink *ds = XILINX_AXI_DMA_DATA_STREAM(obj);
+ struct Stream *s = &ds->dma->streams[1];
+
+ if (!stream_running(s) || stream_idle(s)) {
+ ds->dma->notify = notify;
+ ds->dma->notify_opaque = notify_opaque;
+ return false;
+ }
+
+ return true;
+}
+
+static size_t
+xilinx_axidma_data_stream_push(StreamSink *obj, unsigned char *buf, size_t len,
+ bool eop)
+{
+ XilinxAXIDMAStreamSink *ds = XILINX_AXI_DMA_DATA_STREAM(obj);
+ struct Stream *s = &ds->dma->streams[1];
+ size_t ret;
+
+ ret = stream_process_s2mem(s, buf, len, eop);
+ stream_update_irq(s);
+ return ret;
+}
+
+static uint64_t axidma_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ XilinxAXIDMA *d = opaque;
+ struct Stream *s;
+ uint32_t r = 0;
+ int sid;
+
+ sid = streamid_from_addr(addr);
+ s = &d->streams[sid];
+
+ addr = addr % 0x30;
+ addr >>= 2;
+ switch (addr) {
+ case R_DMACR:
+ /* Simulate one cycles reset delay. */
+ s->regs[addr] &= ~DMACR_RESET;
+ r = s->regs[addr];
+ break;
+ case R_DMASR:
+ s->regs[addr] &= 0xffff;
+ s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
+ s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
+ r = s->regs[addr];
+ break;
+ default:
+ r = s->regs[addr];
+ D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
+ __func__, sid, addr * 4, r));
+ break;
+ }
+ return r;
+
+}
+
+static void axidma_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ XilinxAXIDMA *d = opaque;
+ struct Stream *s;
+ int sid;
+
+ sid = streamid_from_addr(addr);
+ s = &d->streams[sid];
+
+ addr = addr % 0x30;
+ addr >>= 2;
+ switch (addr) {
+ case R_DMACR:
+ /* Tailptr mode is always on. */
+ value |= DMACR_TAILPTR_MODE;
+ /* Remember our previous reset state. */
+ value |= (s->regs[addr] & DMACR_RESET);
+ s->regs[addr] = value;
+
+ if (value & DMACR_RESET) {
+ stream_reset(s);
+ }
+
+ if ((value & 1) && !stream_resetting(s)) {
+ /* Start processing. */
+ s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE);
+ }
+ stream_reload_complete_cnt(s);
+ break;
+
+ case R_DMASR:
+ /* Mask away write to clear irq lines. */
+ value &= ~(value & DMASR_IRQ_MASK);
+ s->regs[addr] = value;
+ break;
+
+ case R_TAILDESC:
+ s->regs[addr] = value;
+ s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle. */
+ if (!sid) {
+ stream_process_mem2s(s, d->tx_data_dev, d->tx_control_dev);
+ }
+ break;
+ default:
+ D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
+ __func__, sid, addr * 4, (unsigned)value));
+ s->regs[addr] = value;
+ break;
+ }
+ if (sid == 1 && d->notify) {
+ StreamCanPushNotifyFn notifytmp = d->notify;
+ d->notify = NULL;
+ notifytmp(d->notify_opaque);
+ }
+ stream_update_irq(s);
+}
+
+static const MemoryRegionOps axidma_ops = {
+ .read = axidma_read,
+ .write = axidma_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void xilinx_axidma_realize(DeviceState *dev, Error **errp)
+{
+ XilinxAXIDMA *s = XILINX_AXI_DMA(dev);
+ XilinxAXIDMAStreamSink *ds = XILINX_AXI_DMA_DATA_STREAM(&s->rx_data_dev);
+ XilinxAXIDMAStreamSink *cs = XILINX_AXI_DMA_CONTROL_STREAM(
+ &s->rx_control_dev);
+ int i;
+
+ object_property_add_link(OBJECT(ds), "dma", TYPE_XILINX_AXI_DMA,
+ (Object **)&ds->dma,
+ object_property_allow_set_link,
+ OBJ_PROP_LINK_STRONG);
+ object_property_add_link(OBJECT(cs), "dma", TYPE_XILINX_AXI_DMA,
+ (Object **)&cs->dma,
+ object_property_allow_set_link,
+ OBJ_PROP_LINK_STRONG);
+ object_property_set_link(OBJECT(ds), "dma", OBJECT(s), &error_abort);
+ object_property_set_link(OBJECT(cs), "dma", OBJECT(s), &error_abort);
+
+ for (i = 0; i < 2; i++) {
+ struct Stream *st = &s->streams[i];
+
+ st->dma = s;
+ st->nr = i;
+ st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_DEFAULT);
+ ptimer_transaction_begin(st->ptimer);
+ ptimer_set_freq(st->ptimer, s->freqhz);
+ ptimer_transaction_commit(st->ptimer);
+ }
+
+ address_space_init(&s->as,
+ s->dma_mr ? s->dma_mr : get_system_memory(), "dma");
+}
+
+static void xilinx_axidma_init(Object *obj)
+{
+ XilinxAXIDMA *s = XILINX_AXI_DMA(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ object_initialize_child(OBJECT(s), "axistream-connected-target",
+ &s->rx_data_dev, TYPE_XILINX_AXI_DMA_DATA_STREAM);
+ object_initialize_child(OBJECT(s), "axistream-control-connected-target",
+ &s->rx_control_dev,
+ TYPE_XILINX_AXI_DMA_CONTROL_STREAM);
+ object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
+ (Object **)&s->dma_mr,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_STRONG);
+
+ sysbus_init_irq(sbd, &s->streams[0].irq);
+ sysbus_init_irq(sbd, &s->streams[1].irq);
+
+ memory_region_init_io(&s->iomem, obj, &axidma_ops, s,
+ "xlnx.axi-dma", R_MAX * 4 * 2);
+ sysbus_init_mmio(sbd, &s->iomem);
+}
+
+static Property axidma_properties[] = {
+ DEFINE_PROP_UINT32("freqhz", XilinxAXIDMA, freqhz, 50000000),
+ DEFINE_PROP_LINK("axistream-connected", XilinxAXIDMA,
+ tx_data_dev, TYPE_STREAM_SINK, StreamSink *),
+ DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIDMA,
+ tx_control_dev, TYPE_STREAM_SINK, StreamSink *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void axidma_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = xilinx_axidma_realize,
+ dc->reset = xilinx_axidma_reset;
+ device_class_set_props(dc, axidma_properties);
+}
+
+static StreamSinkClass xilinx_axidma_data_stream_class = {
+ .push = xilinx_axidma_data_stream_push,
+ .can_push = xilinx_axidma_data_stream_can_push,
+};
+
+static StreamSinkClass xilinx_axidma_control_stream_class = {
+ .push = xilinx_axidma_control_stream_push,
+};
+
+static void xilinx_axidma_stream_class_init(ObjectClass *klass, void *data)
+{
+ StreamSinkClass *ssc = STREAM_SINK_CLASS(klass);
+
+ ssc->push = ((StreamSinkClass *)data)->push;
+ ssc->can_push = ((StreamSinkClass *)data)->can_push;
+}
+
+static const TypeInfo axidma_info = {
+ .name = TYPE_XILINX_AXI_DMA,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XilinxAXIDMA),
+ .class_init = axidma_class_init,
+ .instance_init = xilinx_axidma_init,
+};
+
+static const TypeInfo xilinx_axidma_data_stream_info = {
+ .name = TYPE_XILINX_AXI_DMA_DATA_STREAM,
+ .parent = TYPE_OBJECT,
+ .instance_size = sizeof(XilinxAXIDMAStreamSink),
+ .class_init = xilinx_axidma_stream_class_init,
+ .class_data = &xilinx_axidma_data_stream_class,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_STREAM_SINK },
+ { }
+ }
+};
+
+static const TypeInfo xilinx_axidma_control_stream_info = {
+ .name = TYPE_XILINX_AXI_DMA_CONTROL_STREAM,
+ .parent = TYPE_OBJECT,
+ .instance_size = sizeof(XilinxAXIDMAStreamSink),
+ .class_init = xilinx_axidma_stream_class_init,
+ .class_data = &xilinx_axidma_control_stream_class,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_STREAM_SINK },
+ { }
+ }
+};
+
+static void xilinx_axidma_register_types(void)
+{
+ type_register_static(&axidma_info);
+ type_register_static(&xilinx_axidma_data_stream_info);
+ type_register_static(&xilinx_axidma_control_stream_info);
+}
+
+type_init(xilinx_axidma_register_types)
diff --git a/hw/dma/xlnx-zdma.c b/hw/dma/xlnx-zdma.c
new file mode 100644
index 000000000..a5a92b4ff
--- /dev/null
+++ b/hw/dma/xlnx-zdma.c
@@ -0,0 +1,847 @@
+/*
+ * QEMU model of the ZynqMP generic DMA
+ *
+ * Copyright (c) 2014 Xilinx Inc.
+ * Copyright (c) 2018 FEIMTECH AB
+ *
+ * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>,
+ * Francisco Iglesias <francisco.iglesias@feimtech.se>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/dma/xlnx-zdma.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "qemu/bitops.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+
+#ifndef XLNX_ZDMA_ERR_DEBUG
+#define XLNX_ZDMA_ERR_DEBUG 0
+#endif
+
+REG32(ZDMA_ERR_CTRL, 0x0)
+ FIELD(ZDMA_ERR_CTRL, APB_ERR_RES, 0, 1)
+REG32(ZDMA_CH_ISR, 0x100)
+ FIELD(ZDMA_CH_ISR, DMA_PAUSE, 11, 1)
+ FIELD(ZDMA_CH_ISR, DMA_DONE, 10, 1)
+ FIELD(ZDMA_CH_ISR, AXI_WR_DATA, 9, 1)
+ FIELD(ZDMA_CH_ISR, AXI_RD_DATA, 8, 1)
+ FIELD(ZDMA_CH_ISR, AXI_RD_DST_DSCR, 7, 1)
+ FIELD(ZDMA_CH_ISR, AXI_RD_SRC_DSCR, 6, 1)
+ FIELD(ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, 5, 1)
+ FIELD(ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, 4, 1)
+ FIELD(ZDMA_CH_ISR, BYTE_CNT_OVRFL, 3, 1)
+ FIELD(ZDMA_CH_ISR, DST_DSCR_DONE, 2, 1)
+ FIELD(ZDMA_CH_ISR, SRC_DSCR_DONE, 1, 1)
+ FIELD(ZDMA_CH_ISR, INV_APB, 0, 1)
+REG32(ZDMA_CH_IMR, 0x104)
+ FIELD(ZDMA_CH_IMR, DMA_PAUSE, 11, 1)
+ FIELD(ZDMA_CH_IMR, DMA_DONE, 10, 1)
+ FIELD(ZDMA_CH_IMR, AXI_WR_DATA, 9, 1)
+ FIELD(ZDMA_CH_IMR, AXI_RD_DATA, 8, 1)
+ FIELD(ZDMA_CH_IMR, AXI_RD_DST_DSCR, 7, 1)
+ FIELD(ZDMA_CH_IMR, AXI_RD_SRC_DSCR, 6, 1)
+ FIELD(ZDMA_CH_IMR, IRQ_DST_ACCT_ERR, 5, 1)
+ FIELD(ZDMA_CH_IMR, IRQ_SRC_ACCT_ERR, 4, 1)
+ FIELD(ZDMA_CH_IMR, BYTE_CNT_OVRFL, 3, 1)
+ FIELD(ZDMA_CH_IMR, DST_DSCR_DONE, 2, 1)
+ FIELD(ZDMA_CH_IMR, SRC_DSCR_DONE, 1, 1)
+ FIELD(ZDMA_CH_IMR, INV_APB, 0, 1)
+REG32(ZDMA_CH_IEN, 0x108)
+ FIELD(ZDMA_CH_IEN, DMA_PAUSE, 11, 1)
+ FIELD(ZDMA_CH_IEN, DMA_DONE, 10, 1)
+ FIELD(ZDMA_CH_IEN, AXI_WR_DATA, 9, 1)
+ FIELD(ZDMA_CH_IEN, AXI_RD_DATA, 8, 1)
+ FIELD(ZDMA_CH_IEN, AXI_RD_DST_DSCR, 7, 1)
+ FIELD(ZDMA_CH_IEN, AXI_RD_SRC_DSCR, 6, 1)
+ FIELD(ZDMA_CH_IEN, IRQ_DST_ACCT_ERR, 5, 1)
+ FIELD(ZDMA_CH_IEN, IRQ_SRC_ACCT_ERR, 4, 1)
+ FIELD(ZDMA_CH_IEN, BYTE_CNT_OVRFL, 3, 1)
+ FIELD(ZDMA_CH_IEN, DST_DSCR_DONE, 2, 1)
+ FIELD(ZDMA_CH_IEN, SRC_DSCR_DONE, 1, 1)
+ FIELD(ZDMA_CH_IEN, INV_APB, 0, 1)
+REG32(ZDMA_CH_IDS, 0x10c)
+ FIELD(ZDMA_CH_IDS, DMA_PAUSE, 11, 1)
+ FIELD(ZDMA_CH_IDS, DMA_DONE, 10, 1)
+ FIELD(ZDMA_CH_IDS, AXI_WR_DATA, 9, 1)
+ FIELD(ZDMA_CH_IDS, AXI_RD_DATA, 8, 1)
+ FIELD(ZDMA_CH_IDS, AXI_RD_DST_DSCR, 7, 1)
+ FIELD(ZDMA_CH_IDS, AXI_RD_SRC_DSCR, 6, 1)
+ FIELD(ZDMA_CH_IDS, IRQ_DST_ACCT_ERR, 5, 1)
+ FIELD(ZDMA_CH_IDS, IRQ_SRC_ACCT_ERR, 4, 1)
+ FIELD(ZDMA_CH_IDS, BYTE_CNT_OVRFL, 3, 1)
+ FIELD(ZDMA_CH_IDS, DST_DSCR_DONE, 2, 1)
+ FIELD(ZDMA_CH_IDS, SRC_DSCR_DONE, 1, 1)
+ FIELD(ZDMA_CH_IDS, INV_APB, 0, 1)
+REG32(ZDMA_CH_CTRL0, 0x110)
+ FIELD(ZDMA_CH_CTRL0, OVR_FETCH, 7, 1)
+ FIELD(ZDMA_CH_CTRL0, POINT_TYPE, 6, 1)
+ FIELD(ZDMA_CH_CTRL0, MODE, 4, 2)
+ FIELD(ZDMA_CH_CTRL0, RATE_CTRL, 3, 1)
+ FIELD(ZDMA_CH_CTRL0, CONT_ADDR, 2, 1)
+ FIELD(ZDMA_CH_CTRL0, CONT, 1, 1)
+REG32(ZDMA_CH_CTRL1, 0x114)
+ FIELD(ZDMA_CH_CTRL1, DST_ISSUE, 5, 5)
+ FIELD(ZDMA_CH_CTRL1, SRC_ISSUE, 0, 5)
+REG32(ZDMA_CH_FCI, 0x118)
+ FIELD(ZDMA_CH_FCI, PROG_CELL_CNT, 2, 2)
+ FIELD(ZDMA_CH_FCI, SIDE, 1, 1)
+ FIELD(ZDMA_CH_FCI, EN, 0, 1)
+REG32(ZDMA_CH_STATUS, 0x11c)
+ FIELD(ZDMA_CH_STATUS, STATE, 0, 2)
+REG32(ZDMA_CH_DATA_ATTR, 0x120)
+ FIELD(ZDMA_CH_DATA_ATTR, ARBURST, 26, 2)
+ FIELD(ZDMA_CH_DATA_ATTR, ARCACHE, 22, 4)
+ FIELD(ZDMA_CH_DATA_ATTR, ARQOS, 18, 4)
+ FIELD(ZDMA_CH_DATA_ATTR, ARLEN, 14, 4)
+ FIELD(ZDMA_CH_DATA_ATTR, AWBURST, 12, 2)
+ FIELD(ZDMA_CH_DATA_ATTR, AWCACHE, 8, 4)
+ FIELD(ZDMA_CH_DATA_ATTR, AWQOS, 4, 4)
+ FIELD(ZDMA_CH_DATA_ATTR, AWLEN, 0, 4)
+REG32(ZDMA_CH_DSCR_ATTR, 0x124)
+ FIELD(ZDMA_CH_DSCR_ATTR, AXCOHRNT, 8, 1)
+ FIELD(ZDMA_CH_DSCR_ATTR, AXCACHE, 4, 4)
+ FIELD(ZDMA_CH_DSCR_ATTR, AXQOS, 0, 4)
+REG32(ZDMA_CH_SRC_DSCR_WORD0, 0x128)
+REG32(ZDMA_CH_SRC_DSCR_WORD1, 0x12c)
+ FIELD(ZDMA_CH_SRC_DSCR_WORD1, MSB, 0, 17)
+REG32(ZDMA_CH_SRC_DSCR_WORD2, 0x130)
+ FIELD(ZDMA_CH_SRC_DSCR_WORD2, SIZE, 0, 30)
+REG32(ZDMA_CH_SRC_DSCR_WORD3, 0x134)
+ FIELD(ZDMA_CH_SRC_DSCR_WORD3, CMD, 3, 2)
+ FIELD(ZDMA_CH_SRC_DSCR_WORD3, INTR, 2, 1)
+ FIELD(ZDMA_CH_SRC_DSCR_WORD3, TYPE, 1, 1)
+ FIELD(ZDMA_CH_SRC_DSCR_WORD3, COHRNT, 0, 1)
+REG32(ZDMA_CH_DST_DSCR_WORD0, 0x138)
+REG32(ZDMA_CH_DST_DSCR_WORD1, 0x13c)
+ FIELD(ZDMA_CH_DST_DSCR_WORD1, MSB, 0, 17)
+REG32(ZDMA_CH_DST_DSCR_WORD2, 0x140)
+ FIELD(ZDMA_CH_DST_DSCR_WORD2, SIZE, 0, 30)
+REG32(ZDMA_CH_DST_DSCR_WORD3, 0x144)
+ FIELD(ZDMA_CH_DST_DSCR_WORD3, INTR, 2, 1)
+ FIELD(ZDMA_CH_DST_DSCR_WORD3, TYPE, 1, 1)
+ FIELD(ZDMA_CH_DST_DSCR_WORD3, COHRNT, 0, 1)
+REG32(ZDMA_CH_WR_ONLY_WORD0, 0x148)
+REG32(ZDMA_CH_WR_ONLY_WORD1, 0x14c)
+REG32(ZDMA_CH_WR_ONLY_WORD2, 0x150)
+REG32(ZDMA_CH_WR_ONLY_WORD3, 0x154)
+REG32(ZDMA_CH_SRC_START_LSB, 0x158)
+REG32(ZDMA_CH_SRC_START_MSB, 0x15c)
+ FIELD(ZDMA_CH_SRC_START_MSB, ADDR, 0, 17)
+REG32(ZDMA_CH_DST_START_LSB, 0x160)
+REG32(ZDMA_CH_DST_START_MSB, 0x164)
+ FIELD(ZDMA_CH_DST_START_MSB, ADDR, 0, 17)
+REG32(ZDMA_CH_RATE_CTRL, 0x18c)
+ FIELD(ZDMA_CH_RATE_CTRL, CNT, 0, 12)
+REG32(ZDMA_CH_SRC_CUR_PYLD_LSB, 0x168)
+REG32(ZDMA_CH_SRC_CUR_PYLD_MSB, 0x16c)
+ FIELD(ZDMA_CH_SRC_CUR_PYLD_MSB, ADDR, 0, 17)
+REG32(ZDMA_CH_DST_CUR_PYLD_LSB, 0x170)
+REG32(ZDMA_CH_DST_CUR_PYLD_MSB, 0x174)
+ FIELD(ZDMA_CH_DST_CUR_PYLD_MSB, ADDR, 0, 17)
+REG32(ZDMA_CH_SRC_CUR_DSCR_LSB, 0x178)
+REG32(ZDMA_CH_SRC_CUR_DSCR_MSB, 0x17c)
+ FIELD(ZDMA_CH_SRC_CUR_DSCR_MSB, ADDR, 0, 17)
+REG32(ZDMA_CH_DST_CUR_DSCR_LSB, 0x180)
+REG32(ZDMA_CH_DST_CUR_DSCR_MSB, 0x184)
+ FIELD(ZDMA_CH_DST_CUR_DSCR_MSB, ADDR, 0, 17)
+REG32(ZDMA_CH_TOTAL_BYTE, 0x188)
+REG32(ZDMA_CH_RATE_CNTL, 0x18c)
+ FIELD(ZDMA_CH_RATE_CNTL, CNT, 0, 12)
+REG32(ZDMA_CH_IRQ_SRC_ACCT, 0x190)
+ FIELD(ZDMA_CH_IRQ_SRC_ACCT, CNT, 0, 8)
+REG32(ZDMA_CH_IRQ_DST_ACCT, 0x194)
+ FIELD(ZDMA_CH_IRQ_DST_ACCT, CNT, 0, 8)
+REG32(ZDMA_CH_DBG0, 0x198)
+ FIELD(ZDMA_CH_DBG0, CMN_BUF_FREE, 0, 9)
+REG32(ZDMA_CH_DBG1, 0x19c)
+ FIELD(ZDMA_CH_DBG1, CMN_BUF_OCC, 0, 9)
+REG32(ZDMA_CH_CTRL2, 0x200)
+ FIELD(ZDMA_CH_CTRL2, EN, 0, 1)
+
+enum {
+ PT_REG = 0,
+ PT_MEM = 1,
+};
+
+enum {
+ CMD_HALT = 1,
+ CMD_STOP = 2,
+};
+
+enum {
+ RW_MODE_RW = 0,
+ RW_MODE_WO = 1,
+ RW_MODE_RO = 2,
+};
+
+enum {
+ DTYPE_LINEAR = 0,
+ DTYPE_LINKED = 1,
+};
+
+enum {
+ AXI_BURST_FIXED = 0,
+ AXI_BURST_INCR = 1,
+};
+
+static void zdma_ch_imr_update_irq(XlnxZDMA *s)
+{
+ bool pending;
+
+ pending = s->regs[R_ZDMA_CH_ISR] & ~s->regs[R_ZDMA_CH_IMR];
+
+ qemu_set_irq(s->irq_zdma_ch_imr, pending);
+}
+
+static void zdma_ch_isr_postw(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
+ zdma_ch_imr_update_irq(s);
+}
+
+static uint64_t zdma_ch_ien_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
+ uint32_t val = val64;
+
+ s->regs[R_ZDMA_CH_IMR] &= ~val;
+ zdma_ch_imr_update_irq(s);
+ return 0;
+}
+
+static uint64_t zdma_ch_ids_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
+ uint32_t val = val64;
+
+ s->regs[R_ZDMA_CH_IMR] |= val;
+ zdma_ch_imr_update_irq(s);
+ return 0;
+}
+
+static void zdma_set_state(XlnxZDMA *s, XlnxZDMAState state)
+{
+ s->state = state;
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, state);
+
+ /* Signal error if we have an error condition. */
+ if (s->error) {
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, 3);
+ }
+}
+
+static void zdma_src_done(XlnxZDMA *s)
+{
+ unsigned int cnt;
+ cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT);
+ cnt++;
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT, cnt);
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, SRC_DSCR_DONE, true);
+
+ /* Did we overflow? */
+ if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT)) {
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, true);
+ }
+ zdma_ch_imr_update_irq(s);
+}
+
+static void zdma_dst_done(XlnxZDMA *s)
+{
+ unsigned int cnt;
+ cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT);
+ cnt++;
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT, cnt);
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DST_DSCR_DONE, true);
+
+ /* Did we overflow? */
+ if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT)) {
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, true);
+ }
+ zdma_ch_imr_update_irq(s);
+}
+
+static uint64_t zdma_get_regaddr64(XlnxZDMA *s, unsigned int basereg)
+{
+ uint64_t addr;
+
+ addr = s->regs[basereg + 1];
+ addr <<= 32;
+ addr |= s->regs[basereg];
+
+ return addr;
+}
+
+static void zdma_put_regaddr64(XlnxZDMA *s, unsigned int basereg, uint64_t addr)
+{
+ s->regs[basereg] = addr;
+ s->regs[basereg + 1] = addr >> 32;
+}
+
+static void zdma_load_descriptor_reg(XlnxZDMA *s, unsigned int reg,
+ XlnxZDMADescr *descr)
+{
+ descr->addr = zdma_get_regaddr64(s, reg);
+ descr->size = s->regs[reg + 2];
+ descr->attr = s->regs[reg + 3];
+}
+
+static bool zdma_load_descriptor(XlnxZDMA *s, uint64_t addr,
+ XlnxZDMADescr *descr)
+{
+ /* ZDMA descriptors must be aligned to their own size. */
+ if (addr % sizeof(XlnxZDMADescr)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "zdma: unaligned descriptor at %" PRIx64,
+ addr);
+ memset(descr, 0x0, sizeof(XlnxZDMADescr));
+ s->error = true;
+ return false;
+ }
+
+ descr->addr = address_space_ldq_le(&s->dma_as, addr, s->attr, NULL);
+ descr->size = address_space_ldl_le(&s->dma_as, addr + 8, s->attr, NULL);
+ descr->attr = address_space_ldl_le(&s->dma_as, addr + 12, s->attr, NULL);
+ return true;
+}
+
+static void zdma_load_src_descriptor(XlnxZDMA *s)
+{
+ uint64_t src_addr;
+ unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
+
+ if (ptype == PT_REG) {
+ zdma_load_descriptor_reg(s, R_ZDMA_CH_SRC_DSCR_WORD0, &s->dsc_src);
+ return;
+ }
+
+ src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
+
+ if (!zdma_load_descriptor(s, src_addr, &s->dsc_src)) {
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_SRC_DSCR, true);
+ }
+}
+
+static void zdma_update_descr_addr(XlnxZDMA *s, bool type,
+ unsigned int basereg)
+{
+ uint64_t addr, next;
+
+ if (type == DTYPE_LINEAR) {
+ addr = zdma_get_regaddr64(s, basereg);
+ next = addr + sizeof(s->dsc_dst);
+ } else {
+ addr = zdma_get_regaddr64(s, basereg);
+ addr += sizeof(s->dsc_dst);
+ next = address_space_ldq_le(&s->dma_as, addr, s->attr, NULL);
+ }
+
+ zdma_put_regaddr64(s, basereg, next);
+}
+
+static void zdma_load_dst_descriptor(XlnxZDMA *s)
+{
+ uint64_t dst_addr;
+ unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
+ bool dst_type;
+
+ if (ptype == PT_REG) {
+ zdma_load_descriptor_reg(s, R_ZDMA_CH_DST_DSCR_WORD0, &s->dsc_dst);
+ return;
+ }
+
+ dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB);
+
+ if (!zdma_load_descriptor(s, dst_addr, &s->dsc_dst)) {
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_DST_DSCR, true);
+ }
+
+ /* Advance the descriptor pointer. */
+ dst_type = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3, TYPE);
+ zdma_update_descr_addr(s, dst_type, R_ZDMA_CH_DST_CUR_DSCR_LSB);
+}
+
+static void zdma_write_dst(XlnxZDMA *s, uint8_t *buf, uint32_t len)
+{
+ uint32_t dst_size, dlen;
+ bool dst_intr;
+ unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
+ unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
+ unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR,
+ AWBURST);
+
+ /* FIXED burst types are only supported in simple dma mode. */
+ if (ptype != PT_REG) {
+ burst_type = AXI_BURST_INCR;
+ }
+
+ while (len) {
+ dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
+ SIZE);
+ if (dst_size == 0 && ptype == PT_MEM) {
+ zdma_load_dst_descriptor(s);
+ dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
+ SIZE);
+ }
+
+ /* Match what hardware does by ignoring the dst_size and only using
+ * the src size for Simple register mode. */
+ if (ptype == PT_REG && rw_mode != RW_MODE_WO) {
+ dst_size = len;
+ }
+
+ dst_intr = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
+ INTR);
+
+ dlen = len > dst_size ? dst_size : len;
+ if (burst_type == AXI_BURST_FIXED) {
+ if (dlen > (s->cfg.bus_width / 8)) {
+ dlen = s->cfg.bus_width / 8;
+ }
+ }
+
+ address_space_write(&s->dma_as, s->dsc_dst.addr, s->attr, buf, dlen);
+ if (burst_type == AXI_BURST_INCR) {
+ s->dsc_dst.addr += dlen;
+ }
+ dst_size -= dlen;
+ buf += dlen;
+ len -= dlen;
+
+ if (dst_size == 0 && dst_intr) {
+ zdma_dst_done(s);
+ }
+
+ /* Write back to buffered descriptor. */
+ s->dsc_dst.words[2] = FIELD_DP32(s->dsc_dst.words[2],
+ ZDMA_CH_DST_DSCR_WORD2,
+ SIZE,
+ dst_size);
+ }
+}
+
+static void zdma_process_descr(XlnxZDMA *s)
+{
+ uint64_t src_addr;
+ uint32_t src_size, len;
+ unsigned int src_cmd;
+ bool src_intr, src_type;
+ unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
+ unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
+ unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR,
+ ARBURST);
+
+ src_addr = s->dsc_src.addr;
+ src_size = FIELD_EX32(s->dsc_src.words[2], ZDMA_CH_SRC_DSCR_WORD2, SIZE);
+ src_cmd = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, CMD);
+ src_type = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, TYPE);
+ src_intr = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, INTR);
+
+ /* FIXED burst types and non-rw modes are only supported in
+ * simple dma mode.
+ */
+ if (ptype != PT_REG) {
+ if (rw_mode != RW_MODE_RW) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "zDMA: rw-mode=%d but not simple DMA mode.\n",
+ rw_mode);
+ }
+ if (burst_type != AXI_BURST_INCR) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "zDMA: burst_type=%d but not simple DMA mode.\n",
+ burst_type);
+ }
+ burst_type = AXI_BURST_INCR;
+ rw_mode = RW_MODE_RW;
+ }
+
+ if (rw_mode == RW_MODE_WO) {
+ /* In Simple DMA Write-Only, we need to push DST size bytes
+ * regardless of what SRC size is set to. */
+ src_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
+ SIZE);
+ memcpy(s->buf, &s->regs[R_ZDMA_CH_WR_ONLY_WORD0], s->cfg.bus_width / 8);
+ }
+
+ while (src_size) {
+ len = src_size > ARRAY_SIZE(s->buf) ? ARRAY_SIZE(s->buf) : src_size;
+ if (burst_type == AXI_BURST_FIXED) {
+ if (len > (s->cfg.bus_width / 8)) {
+ len = s->cfg.bus_width / 8;
+ }
+ }
+
+ if (rw_mode == RW_MODE_WO) {
+ if (len > s->cfg.bus_width / 8) {
+ len = s->cfg.bus_width / 8;
+ }
+ } else {
+ address_space_read(&s->dma_as, src_addr, s->attr, s->buf, len);
+ if (burst_type == AXI_BURST_INCR) {
+ src_addr += len;
+ }
+ }
+
+ if (rw_mode != RW_MODE_RO) {
+ zdma_write_dst(s, s->buf, len);
+ }
+
+ s->regs[R_ZDMA_CH_TOTAL_BYTE] += len;
+ src_size -= len;
+ }
+
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_DONE, true);
+
+ if (src_intr) {
+ zdma_src_done(s);
+ }
+
+ if (ptype == PT_REG || src_cmd == CMD_STOP) {
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL2, EN, 0);
+ zdma_set_state(s, DISABLED);
+ }
+
+ if (src_cmd == CMD_HALT) {
+ zdma_set_state(s, PAUSED);
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_PAUSE, 1);
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_DONE, false);
+ zdma_ch_imr_update_irq(s);
+ return;
+ }
+
+ zdma_update_descr_addr(s, src_type, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
+}
+
+static void zdma_run(XlnxZDMA *s)
+{
+ while (s->state == ENABLED && !s->error) {
+ zdma_load_src_descriptor(s);
+
+ if (s->error) {
+ zdma_set_state(s, DISABLED);
+ } else {
+ zdma_process_descr(s);
+ }
+ }
+
+ zdma_ch_imr_update_irq(s);
+}
+
+static void zdma_update_descr_addr_from_start(XlnxZDMA *s)
+{
+ uint64_t src_addr, dst_addr;
+
+ src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_START_LSB);
+ zdma_put_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB, src_addr);
+ dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_START_LSB);
+ zdma_put_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB, dst_addr);
+ zdma_load_dst_descriptor(s);
+}
+
+static void zdma_ch_ctrlx_postw(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
+
+ if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL2, EN)) {
+ s->error = false;
+
+ if (s->state == PAUSED &&
+ ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
+ if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT_ADDR) == 1) {
+ zdma_update_descr_addr_from_start(s);
+ } else {
+ bool src_type = FIELD_EX32(s->dsc_src.words[3],
+ ZDMA_CH_SRC_DSCR_WORD3, TYPE);
+ zdma_update_descr_addr(s, src_type,
+ R_ZDMA_CH_SRC_CUR_DSCR_LSB);
+ }
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL0, CONT, false);
+ zdma_set_state(s, ENABLED);
+ } else if (s->state == DISABLED) {
+ zdma_update_descr_addr_from_start(s);
+ zdma_set_state(s, ENABLED);
+ }
+ } else {
+ /* Leave Paused state? */
+ if (s->state == PAUSED &&
+ ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
+ zdma_set_state(s, DISABLED);
+ }
+ }
+
+ zdma_run(s);
+}
+
+static RegisterAccessInfo zdma_regs_info[] = {
+ { .name = "ZDMA_ERR_CTRL", .addr = A_ZDMA_ERR_CTRL,
+ .rsvd = 0xfffffffe,
+ },{ .name = "ZDMA_CH_ISR", .addr = A_ZDMA_CH_ISR,
+ .rsvd = 0xfffff000,
+ .w1c = 0xfff,
+ .post_write = zdma_ch_isr_postw,
+ },{ .name = "ZDMA_CH_IMR", .addr = A_ZDMA_CH_IMR,
+ .reset = 0xfff,
+ .rsvd = 0xfffff000,
+ .ro = 0xfff,
+ },{ .name = "ZDMA_CH_IEN", .addr = A_ZDMA_CH_IEN,
+ .rsvd = 0xfffff000,
+ .pre_write = zdma_ch_ien_prew,
+ },{ .name = "ZDMA_CH_IDS", .addr = A_ZDMA_CH_IDS,
+ .rsvd = 0xfffff000,
+ .pre_write = zdma_ch_ids_prew,
+ },{ .name = "ZDMA_CH_CTRL0", .addr = A_ZDMA_CH_CTRL0,
+ .reset = 0x80,
+ .rsvd = 0xffffff01,
+ .post_write = zdma_ch_ctrlx_postw,
+ },{ .name = "ZDMA_CH_CTRL1", .addr = A_ZDMA_CH_CTRL1,
+ .reset = 0x3ff,
+ .rsvd = 0xfffffc00,
+ },{ .name = "ZDMA_CH_FCI", .addr = A_ZDMA_CH_FCI,
+ .rsvd = 0xffffffc0,
+ },{ .name = "ZDMA_CH_STATUS", .addr = A_ZDMA_CH_STATUS,
+ .rsvd = 0xfffffffc,
+ .ro = 0x3,
+ },{ .name = "ZDMA_CH_DATA_ATTR", .addr = A_ZDMA_CH_DATA_ATTR,
+ .reset = 0x483d20f,
+ .rsvd = 0xf0000000,
+ },{ .name = "ZDMA_CH_DSCR_ATTR", .addr = A_ZDMA_CH_DSCR_ATTR,
+ .rsvd = 0xfffffe00,
+ },{ .name = "ZDMA_CH_SRC_DSCR_WORD0", .addr = A_ZDMA_CH_SRC_DSCR_WORD0,
+ },{ .name = "ZDMA_CH_SRC_DSCR_WORD1", .addr = A_ZDMA_CH_SRC_DSCR_WORD1,
+ .rsvd = 0xfffe0000,
+ },{ .name = "ZDMA_CH_SRC_DSCR_WORD2", .addr = A_ZDMA_CH_SRC_DSCR_WORD2,
+ .rsvd = 0xc0000000,
+ },{ .name = "ZDMA_CH_SRC_DSCR_WORD3", .addr = A_ZDMA_CH_SRC_DSCR_WORD3,
+ .rsvd = 0xffffffe0,
+ },{ .name = "ZDMA_CH_DST_DSCR_WORD0", .addr = A_ZDMA_CH_DST_DSCR_WORD0,
+ },{ .name = "ZDMA_CH_DST_DSCR_WORD1", .addr = A_ZDMA_CH_DST_DSCR_WORD1,
+ .rsvd = 0xfffe0000,
+ },{ .name = "ZDMA_CH_DST_DSCR_WORD2", .addr = A_ZDMA_CH_DST_DSCR_WORD2,
+ .rsvd = 0xc0000000,
+ },{ .name = "ZDMA_CH_DST_DSCR_WORD3", .addr = A_ZDMA_CH_DST_DSCR_WORD3,
+ .rsvd = 0xfffffffa,
+ },{ .name = "ZDMA_CH_WR_ONLY_WORD0", .addr = A_ZDMA_CH_WR_ONLY_WORD0,
+ },{ .name = "ZDMA_CH_WR_ONLY_WORD1", .addr = A_ZDMA_CH_WR_ONLY_WORD1,
+ },{ .name = "ZDMA_CH_WR_ONLY_WORD2", .addr = A_ZDMA_CH_WR_ONLY_WORD2,
+ },{ .name = "ZDMA_CH_WR_ONLY_WORD3", .addr = A_ZDMA_CH_WR_ONLY_WORD3,
+ },{ .name = "ZDMA_CH_SRC_START_LSB", .addr = A_ZDMA_CH_SRC_START_LSB,
+ },{ .name = "ZDMA_CH_SRC_START_MSB", .addr = A_ZDMA_CH_SRC_START_MSB,
+ .rsvd = 0xfffe0000,
+ },{ .name = "ZDMA_CH_DST_START_LSB", .addr = A_ZDMA_CH_DST_START_LSB,
+ },{ .name = "ZDMA_CH_DST_START_MSB", .addr = A_ZDMA_CH_DST_START_MSB,
+ .rsvd = 0xfffe0000,
+ },{ .name = "ZDMA_CH_SRC_CUR_PYLD_LSB", .addr = A_ZDMA_CH_SRC_CUR_PYLD_LSB,
+ .ro = 0xffffffff,
+ },{ .name = "ZDMA_CH_SRC_CUR_PYLD_MSB", .addr = A_ZDMA_CH_SRC_CUR_PYLD_MSB,
+ .rsvd = 0xfffe0000,
+ .ro = 0x1ffff,
+ },{ .name = "ZDMA_CH_DST_CUR_PYLD_LSB", .addr = A_ZDMA_CH_DST_CUR_PYLD_LSB,
+ .ro = 0xffffffff,
+ },{ .name = "ZDMA_CH_DST_CUR_PYLD_MSB", .addr = A_ZDMA_CH_DST_CUR_PYLD_MSB,
+ .rsvd = 0xfffe0000,
+ .ro = 0x1ffff,
+ },{ .name = "ZDMA_CH_SRC_CUR_DSCR_LSB", .addr = A_ZDMA_CH_SRC_CUR_DSCR_LSB,
+ .ro = 0xffffffff,
+ },{ .name = "ZDMA_CH_SRC_CUR_DSCR_MSB", .addr = A_ZDMA_CH_SRC_CUR_DSCR_MSB,
+ .rsvd = 0xfffe0000,
+ .ro = 0x1ffff,
+ },{ .name = "ZDMA_CH_DST_CUR_DSCR_LSB", .addr = A_ZDMA_CH_DST_CUR_DSCR_LSB,
+ .ro = 0xffffffff,
+ },{ .name = "ZDMA_CH_DST_CUR_DSCR_MSB", .addr = A_ZDMA_CH_DST_CUR_DSCR_MSB,
+ .rsvd = 0xfffe0000,
+ .ro = 0x1ffff,
+ },{ .name = "ZDMA_CH_TOTAL_BYTE", .addr = A_ZDMA_CH_TOTAL_BYTE,
+ .w1c = 0xffffffff,
+ },{ .name = "ZDMA_CH_RATE_CNTL", .addr = A_ZDMA_CH_RATE_CNTL,
+ .rsvd = 0xfffff000,
+ },{ .name = "ZDMA_CH_IRQ_SRC_ACCT", .addr = A_ZDMA_CH_IRQ_SRC_ACCT,
+ .rsvd = 0xffffff00,
+ .ro = 0xff,
+ .cor = 0xff,
+ },{ .name = "ZDMA_CH_IRQ_DST_ACCT", .addr = A_ZDMA_CH_IRQ_DST_ACCT,
+ .rsvd = 0xffffff00,
+ .ro = 0xff,
+ .cor = 0xff,
+ },{ .name = "ZDMA_CH_DBG0", .addr = A_ZDMA_CH_DBG0,
+ .rsvd = 0xfffffe00,
+ .ro = 0x1ff,
+
+ /*
+ * There's SW out there that will check the debug regs for free space.
+ * Claim that we always have 0x100 free.
+ */
+ .reset = 0x100
+ },{ .name = "ZDMA_CH_DBG1", .addr = A_ZDMA_CH_DBG1,
+ .rsvd = 0xfffffe00,
+ .ro = 0x1ff,
+ },{ .name = "ZDMA_CH_CTRL2", .addr = A_ZDMA_CH_CTRL2,
+ .rsvd = 0xfffffffe,
+ .post_write = zdma_ch_ctrlx_postw,
+ }
+};
+
+static void zdma_reset(DeviceState *dev)
+{
+ XlnxZDMA *s = XLNX_ZDMA(dev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
+ register_reset(&s->regs_info[i]);
+ }
+
+ zdma_ch_imr_update_irq(s);
+}
+
+static uint64_t zdma_read(void *opaque, hwaddr addr, unsigned size)
+{
+ XlnxZDMA *s = XLNX_ZDMA(opaque);
+ RegisterInfo *r = &s->regs_info[addr / 4];
+
+ if (!r->data) {
+ char *path = object_get_canonical_path(OBJECT(s));
+ qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n",
+ path,
+ addr);
+ g_free(path);
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
+ zdma_ch_imr_update_irq(s);
+ return 0;
+ }
+ return register_read(r, ~0, NULL, false);
+}
+
+static void zdma_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned size)
+{
+ XlnxZDMA *s = XLNX_ZDMA(opaque);
+ RegisterInfo *r = &s->regs_info[addr / 4];
+
+ if (!r->data) {
+ char *path = object_get_canonical_path(OBJECT(s));
+ qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n",
+ path,
+ addr, value);
+ g_free(path);
+ ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
+ zdma_ch_imr_update_irq(s);
+ return;
+ }
+ register_write(r, value, ~0, NULL, false);
+}
+
+static const MemoryRegionOps zdma_ops = {
+ .read = zdma_read,
+ .write = zdma_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void zdma_realize(DeviceState *dev, Error **errp)
+{
+ XlnxZDMA *s = XLNX_ZDMA(dev);
+ unsigned int i;
+
+ if (!s->dma_mr) {
+ error_setg(errp, TYPE_XLNX_ZDMA " 'dma' link not set");
+ return;
+ }
+ address_space_init(&s->dma_as, s->dma_mr, "zdma-dma");
+
+ for (i = 0; i < ARRAY_SIZE(zdma_regs_info); ++i) {
+ RegisterInfo *r = &s->regs_info[zdma_regs_info[i].addr / 4];
+
+ *r = (RegisterInfo) {
+ .data = (uint8_t *)&s->regs[
+ zdma_regs_info[i].addr / 4],
+ .data_size = sizeof(uint32_t),
+ .access = &zdma_regs_info[i],
+ .opaque = s,
+ };
+ }
+
+ s->attr = MEMTXATTRS_UNSPECIFIED;
+}
+
+static void zdma_init(Object *obj)
+{
+ XlnxZDMA *s = XLNX_ZDMA(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ memory_region_init_io(&s->iomem, obj, &zdma_ops, s,
+ TYPE_XLNX_ZDMA, ZDMA_R_MAX * 4);
+ sysbus_init_mmio(sbd, &s->iomem);
+ sysbus_init_irq(sbd, &s->irq_zdma_ch_imr);
+
+ object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
+ (Object **)&s->dma_mr,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_STRONG);
+}
+
+static const VMStateDescription vmstate_zdma = {
+ .name = TYPE_XLNX_ZDMA,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX),
+ VMSTATE_UINT32(state, XlnxZDMA),
+ VMSTATE_UINT32_ARRAY(dsc_src.words, XlnxZDMA, 4),
+ VMSTATE_UINT32_ARRAY(dsc_dst.words, XlnxZDMA, 4),
+ VMSTATE_END_OF_LIST(),
+ }
+};
+
+static Property zdma_props[] = {
+ DEFINE_PROP_UINT32("bus-width", XlnxZDMA, cfg.bus_width, 64),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void zdma_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = zdma_reset;
+ dc->realize = zdma_realize;
+ device_class_set_props(dc, zdma_props);
+ dc->vmsd = &vmstate_zdma;
+}
+
+static const TypeInfo zdma_info = {
+ .name = TYPE_XLNX_ZDMA,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XlnxZDMA),
+ .class_init = zdma_class_init,
+ .instance_init = zdma_init,
+};
+
+static void zdma_register_types(void)
+{
+ type_register_static(&zdma_info);
+}
+
+type_init(zdma_register_types)
diff --git a/hw/dma/xlnx-zynq-devcfg.c b/hw/dma/xlnx-zynq-devcfg.c
new file mode 100644
index 000000000..e33112b6f
--- /dev/null
+++ b/hw/dma/xlnx-zynq-devcfg.c
@@ -0,0 +1,402 @@
+/*
+ * QEMU model of the Xilinx Zynq Devcfg Interface
+ *
+ * (C) 2011 PetaLogix Pty Ltd
+ * (C) 2014 Xilinx Inc.
+ * Written by Peter Crosthwaite <peter.crosthwaite@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/dma/xlnx-zynq-devcfg.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+#include "qemu/bitops.h"
+#include "sysemu/dma.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+
+#define FREQ_HZ 900000000
+
+#define BTT_MAX 0x400
+
+#ifndef XLNX_ZYNQ_DEVCFG_ERR_DEBUG
+#define XLNX_ZYNQ_DEVCFG_ERR_DEBUG 0
+#endif
+
+#define DB_PRINT(fmt, args...) do { \
+ if (XLNX_ZYNQ_DEVCFG_ERR_DEBUG) { \
+ qemu_log("%s: " fmt, __func__, ## args); \
+ } \
+} while (0)
+
+REG32(CTRL, 0x00)
+ FIELD(CTRL, FORCE_RST, 31, 1) /* Not supported, wr ignored */
+ FIELD(CTRL, PCAP_PR, 27, 1) /* Forced to 0 on bad unlock */
+ FIELD(CTRL, PCAP_MODE, 26, 1)
+ FIELD(CTRL, MULTIBOOT_EN, 24, 1)
+ FIELD(CTRL, USER_MODE, 15, 1)
+ FIELD(CTRL, PCFG_AES_FUSE, 12, 1)
+ FIELD(CTRL, PCFG_AES_EN, 9, 3)
+ FIELD(CTRL, SEU_EN, 8, 1)
+ FIELD(CTRL, SEC_EN, 7, 1)
+ FIELD(CTRL, SPNIDEN, 6, 1)
+ FIELD(CTRL, SPIDEN, 5, 1)
+ FIELD(CTRL, NIDEN, 4, 1)
+ FIELD(CTRL, DBGEN, 3, 1)
+ FIELD(CTRL, DAP_EN, 0, 3)
+
+REG32(LOCK, 0x04)
+#define AES_FUSE_LOCK 4
+#define AES_EN_LOCK 3
+#define SEU_LOCK 2
+#define SEC_LOCK 1
+#define DBG_LOCK 0
+
+/* mapping bits in R_LOCK to what they lock in R_CTRL */
+static const uint32_t lock_ctrl_map[] = {
+ [AES_FUSE_LOCK] = R_CTRL_PCFG_AES_FUSE_MASK,
+ [AES_EN_LOCK] = R_CTRL_PCFG_AES_EN_MASK,
+ [SEU_LOCK] = R_CTRL_SEU_EN_MASK,
+ [SEC_LOCK] = R_CTRL_SEC_EN_MASK,
+ [DBG_LOCK] = R_CTRL_SPNIDEN_MASK | R_CTRL_SPIDEN_MASK |
+ R_CTRL_NIDEN_MASK | R_CTRL_DBGEN_MASK |
+ R_CTRL_DAP_EN_MASK,
+};
+
+REG32(CFG, 0x08)
+ FIELD(CFG, RFIFO_TH, 10, 2)
+ FIELD(CFG, WFIFO_TH, 8, 2)
+ FIELD(CFG, RCLK_EDGE, 7, 1)
+ FIELD(CFG, WCLK_EDGE, 6, 1)
+ FIELD(CFG, DISABLE_SRC_INC, 5, 1)
+ FIELD(CFG, DISABLE_DST_INC, 4, 1)
+#define R_CFG_RESET 0x50B
+
+REG32(INT_STS, 0x0C)
+ FIELD(INT_STS, PSS_GTS_USR_B, 31, 1)
+ FIELD(INT_STS, PSS_FST_CFG_B, 30, 1)
+ FIELD(INT_STS, PSS_CFG_RESET_B, 27, 1)
+ FIELD(INT_STS, RX_FIFO_OV, 18, 1)
+ FIELD(INT_STS, WR_FIFO_LVL, 17, 1)
+ FIELD(INT_STS, RD_FIFO_LVL, 16, 1)
+ FIELD(INT_STS, DMA_CMD_ERR, 15, 1)
+ FIELD(INT_STS, DMA_Q_OV, 14, 1)
+ FIELD(INT_STS, DMA_DONE, 13, 1)
+ FIELD(INT_STS, DMA_P_DONE, 12, 1)
+ FIELD(INT_STS, P2D_LEN_ERR, 11, 1)
+ FIELD(INT_STS, PCFG_DONE, 2, 1)
+#define R_INT_STS_RSVD ((0x7 << 24) | (0x1 << 19) | (0xF < 7))
+
+REG32(INT_MASK, 0x10)
+
+REG32(STATUS, 0x14)
+ FIELD(STATUS, DMA_CMD_Q_F, 31, 1)
+ FIELD(STATUS, DMA_CMD_Q_E, 30, 1)
+ FIELD(STATUS, DMA_DONE_CNT, 28, 2)
+ FIELD(STATUS, RX_FIFO_LVL, 20, 5)
+ FIELD(STATUS, TX_FIFO_LVL, 12, 7)
+ FIELD(STATUS, PSS_GTS_USR_B, 11, 1)
+ FIELD(STATUS, PSS_FST_CFG_B, 10, 1)
+ FIELD(STATUS, PSS_CFG_RESET_B, 5, 1)
+
+REG32(DMA_SRC_ADDR, 0x18)
+REG32(DMA_DST_ADDR, 0x1C)
+REG32(DMA_SRC_LEN, 0x20)
+REG32(DMA_DST_LEN, 0x24)
+REG32(ROM_SHADOW, 0x28)
+REG32(SW_ID, 0x30)
+REG32(UNLOCK, 0x34)
+
+#define R_UNLOCK_MAGIC 0x757BDF0D
+
+REG32(MCTRL, 0x80)
+ FIELD(MCTRL, PS_VERSION, 28, 4)
+ FIELD(MCTRL, PCFG_POR_B, 8, 1)
+ FIELD(MCTRL, INT_PCAP_LPBK, 4, 1)
+ FIELD(MCTRL, QEMU, 3, 1)
+
+static void xlnx_zynq_devcfg_update_ixr(XlnxZynqDevcfg *s)
+{
+ qemu_set_irq(s->irq, ~s->regs[R_INT_MASK] & s->regs[R_INT_STS]);
+}
+
+static void xlnx_zynq_devcfg_reset(DeviceState *dev)
+{
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(dev);
+ int i;
+
+ for (i = 0; i < XLNX_ZYNQ_DEVCFG_R_MAX; ++i) {
+ register_reset(&s->regs_info[i]);
+ }
+}
+
+static void xlnx_zynq_devcfg_dma_go(XlnxZynqDevcfg *s)
+{
+ do {
+ uint8_t buf[BTT_MAX];
+ XlnxZynqDevcfgDMACmd *dmah = s->dma_cmd_fifo;
+ uint32_t btt = BTT_MAX;
+ bool loopback = s->regs[R_MCTRL] & R_MCTRL_INT_PCAP_LPBK_MASK;
+
+ btt = MIN(btt, dmah->src_len);
+ if (loopback) {
+ btt = MIN(btt, dmah->dest_len);
+ }
+ DB_PRINT("reading %x bytes from %x\n", btt, dmah->src_addr);
+ dma_memory_read(&address_space_memory, dmah->src_addr, buf, btt);
+ dmah->src_len -= btt;
+ dmah->src_addr += btt;
+ if (loopback && (dmah->src_len || dmah->dest_len)) {
+ DB_PRINT("writing %x bytes from %x\n", btt, dmah->dest_addr);
+ dma_memory_write(&address_space_memory, dmah->dest_addr, buf, btt);
+ dmah->dest_len -= btt;
+ dmah->dest_addr += btt;
+ }
+ if (!dmah->src_len && !dmah->dest_len) {
+ DB_PRINT("dma operation finished\n");
+ s->regs[R_INT_STS] |= R_INT_STS_DMA_DONE_MASK |
+ R_INT_STS_DMA_P_DONE_MASK;
+ s->dma_cmd_fifo_num--;
+ memmove(s->dma_cmd_fifo, &s->dma_cmd_fifo[1],
+ sizeof(s->dma_cmd_fifo) - sizeof(s->dma_cmd_fifo[0]));
+ }
+ xlnx_zynq_devcfg_update_ixr(s);
+ } while (s->dma_cmd_fifo_num);
+}
+
+static void r_ixr_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
+
+ xlnx_zynq_devcfg_update_ixr(s);
+}
+
+static uint64_t r_ctrl_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lock_ctrl_map); ++i) {
+ if (s->regs[R_LOCK] & 1 << i) {
+ val &= ~lock_ctrl_map[i];
+ val |= lock_ctrl_map[i] & s->regs[R_CTRL];
+ }
+ }
+ return val;
+}
+
+static void r_ctrl_post_write(RegisterInfo *reg, uint64_t val)
+{
+ const char *device_prefix = object_get_typename(OBJECT(reg->opaque));
+ uint32_t aes_en = FIELD_EX32(val, CTRL, PCFG_AES_EN);
+
+ if (aes_en != 0 && aes_en != 7) {
+ qemu_log_mask(LOG_UNIMP, "%s: warning, aes-en bits inconsistent,"
+ "unimplemented security reset should happen!\n",
+ device_prefix);
+ }
+}
+
+static void r_unlock_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
+ const char *device_prefix = object_get_typename(OBJECT(s));
+
+ if (val == R_UNLOCK_MAGIC) {
+ DB_PRINT("successful unlock\n");
+ s->regs[R_CTRL] |= R_CTRL_PCAP_PR_MASK;
+ s->regs[R_CTRL] |= R_CTRL_PCFG_AES_EN_MASK;
+ memory_region_set_enabled(&s->iomem, true);
+ } else { /* bad unlock attempt */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: failed unlock\n", device_prefix);
+ s->regs[R_CTRL] &= ~R_CTRL_PCAP_PR_MASK;
+ s->regs[R_CTRL] &= ~R_CTRL_PCFG_AES_EN_MASK;
+ /* core becomes inaccessible */
+ memory_region_set_enabled(&s->iomem, false);
+ }
+}
+
+static uint64_t r_lock_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
+
+ /* once bits are locked they stay locked */
+ return s->regs[R_LOCK] | val;
+}
+
+static void r_dma_dst_len_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
+
+ s->dma_cmd_fifo[s->dma_cmd_fifo_num] = (XlnxZynqDevcfgDMACmd) {
+ .src_addr = s->regs[R_DMA_SRC_ADDR] & ~0x3UL,
+ .dest_addr = s->regs[R_DMA_DST_ADDR] & ~0x3UL,
+ .src_len = s->regs[R_DMA_SRC_LEN] << 2,
+ .dest_len = s->regs[R_DMA_DST_LEN] << 2,
+ };
+ s->dma_cmd_fifo_num++;
+ DB_PRINT("dma transfer started; %d total transfers pending\n",
+ s->dma_cmd_fifo_num);
+ xlnx_zynq_devcfg_dma_go(s);
+}
+
+static const RegisterAccessInfo xlnx_zynq_devcfg_regs_info[] = {
+ { .name = "CTRL", .addr = A_CTRL,
+ .reset = R_CTRL_PCAP_PR_MASK | R_CTRL_PCAP_MODE_MASK | 0x3 << 13,
+ .rsvd = 0x1 << 28 | 0x3ff << 13 | 0x3 << 13,
+ .pre_write = r_ctrl_pre_write,
+ .post_write = r_ctrl_post_write,
+ },
+ { .name = "LOCK", .addr = A_LOCK,
+ .rsvd = MAKE_64BIT_MASK(5, 64 - 5),
+ .pre_write = r_lock_pre_write,
+ },
+ { .name = "CFG", .addr = A_CFG,
+ .reset = R_CFG_RESET,
+ .rsvd = 0xfffff00f,
+ },
+ { .name = "INT_STS", .addr = A_INT_STS,
+ .w1c = ~R_INT_STS_RSVD,
+ .reset = R_INT_STS_PSS_GTS_USR_B_MASK |
+ R_INT_STS_PSS_CFG_RESET_B_MASK |
+ R_INT_STS_WR_FIFO_LVL_MASK,
+ .rsvd = R_INT_STS_RSVD,
+ .post_write = r_ixr_post_write,
+ },
+ { .name = "INT_MASK", .addr = A_INT_MASK,
+ .reset = ~0,
+ .rsvd = R_INT_STS_RSVD,
+ .post_write = r_ixr_post_write,
+ },
+ { .name = "STATUS", .addr = A_STATUS,
+ .reset = R_STATUS_DMA_CMD_Q_E_MASK |
+ R_STATUS_PSS_GTS_USR_B_MASK |
+ R_STATUS_PSS_CFG_RESET_B_MASK,
+ .ro = ~0,
+ },
+ { .name = "DMA_SRC_ADDR", .addr = A_DMA_SRC_ADDR, },
+ { .name = "DMA_DST_ADDR", .addr = A_DMA_DST_ADDR, },
+ { .name = "DMA_SRC_LEN", .addr = A_DMA_SRC_LEN,
+ .ro = MAKE_64BIT_MASK(27, 64 - 27) },
+ { .name = "DMA_DST_LEN", .addr = A_DMA_DST_LEN,
+ .ro = MAKE_64BIT_MASK(27, 64 - 27),
+ .post_write = r_dma_dst_len_post_write,
+ },
+ { .name = "ROM_SHADOW", .addr = A_ROM_SHADOW,
+ .rsvd = ~0ull,
+ },
+ { .name = "SW_ID", .addr = A_SW_ID, },
+ { .name = "UNLOCK", .addr = A_UNLOCK,
+ .post_write = r_unlock_post_write,
+ },
+ { .name = "MCTRL", .addr = R_MCTRL * 4,
+ /* Silicon 3.0 for version field, the mysterious reserved bit 23
+ * and QEMU platform identifier.
+ */
+ .reset = 0x2 << R_MCTRL_PS_VERSION_SHIFT | 1 << 23 | R_MCTRL_QEMU_MASK,
+ .ro = ~R_MCTRL_INT_PCAP_LPBK_MASK,
+ .rsvd = 0x00f00303,
+ },
+};
+
+static const MemoryRegionOps xlnx_zynq_devcfg_reg_ops = {
+ .read = register_read_memory,
+ .write = register_write_memory,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+static const VMStateDescription vmstate_xlnx_zynq_devcfg_dma_cmd = {
+ .name = "xlnx_zynq_devcfg_dma_cmd",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(src_addr, XlnxZynqDevcfgDMACmd),
+ VMSTATE_UINT32(dest_addr, XlnxZynqDevcfgDMACmd),
+ VMSTATE_UINT32(src_len, XlnxZynqDevcfgDMACmd),
+ VMSTATE_UINT32(dest_len, XlnxZynqDevcfgDMACmd),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_xlnx_zynq_devcfg = {
+ .name = "xlnx_zynq_devcfg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(dma_cmd_fifo, XlnxZynqDevcfg,
+ XLNX_ZYNQ_DEVCFG_DMA_CMD_FIFO_LEN, 0,
+ vmstate_xlnx_zynq_devcfg_dma_cmd,
+ XlnxZynqDevcfgDMACmd),
+ VMSTATE_UINT8(dma_cmd_fifo_num, XlnxZynqDevcfg),
+ VMSTATE_UINT32_ARRAY(regs, XlnxZynqDevcfg, XLNX_ZYNQ_DEVCFG_R_MAX),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void xlnx_zynq_devcfg_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(obj);
+ RegisterInfoArray *reg_array;
+
+ sysbus_init_irq(sbd, &s->irq);
+
+ memory_region_init(&s->iomem, obj, "devcfg", XLNX_ZYNQ_DEVCFG_R_MAX * 4);
+ reg_array =
+ register_init_block32(DEVICE(obj), xlnx_zynq_devcfg_regs_info,
+ ARRAY_SIZE(xlnx_zynq_devcfg_regs_info),
+ s->regs_info, s->regs,
+ &xlnx_zynq_devcfg_reg_ops,
+ XLNX_ZYNQ_DEVCFG_ERR_DEBUG,
+ XLNX_ZYNQ_DEVCFG_R_MAX);
+ memory_region_add_subregion(&s->iomem,
+ A_CTRL,
+ &reg_array->mem);
+
+ sysbus_init_mmio(sbd, &s->iomem);
+}
+
+static void xlnx_zynq_devcfg_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = xlnx_zynq_devcfg_reset;
+ dc->vmsd = &vmstate_xlnx_zynq_devcfg;
+}
+
+static const TypeInfo xlnx_zynq_devcfg_info = {
+ .name = TYPE_XLNX_ZYNQ_DEVCFG,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XlnxZynqDevcfg),
+ .instance_init = xlnx_zynq_devcfg_init,
+ .class_init = xlnx_zynq_devcfg_class_init,
+};
+
+static void xlnx_zynq_devcfg_register_types(void)
+{
+ type_register_static(&xlnx_zynq_devcfg_info);
+}
+
+type_init(xlnx_zynq_devcfg_register_types)
diff --git a/hw/dma/xlnx_csu_dma.c b/hw/dma/xlnx_csu_dma.c
new file mode 100644
index 000000000..896bb3574
--- /dev/null
+++ b/hw/dma/xlnx_csu_dma.c
@@ -0,0 +1,743 @@
+/*
+ * Xilinx Platform CSU Stream DMA emulation
+ *
+ * This implementation is based on
+ * https://github.com/Xilinx/qemu/blob/master/hw/dma/csu_stream_dma.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qapi/error.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "sysemu/dma.h"
+#include "hw/ptimer.h"
+#include "hw/stream.h"
+#include "hw/register.h"
+#include "hw/dma/xlnx_csu_dma.h"
+
+/*
+ * Ref: UG1087 (v1.7) February 8, 2019
+ * https://www.xilinx.com/html_docs/registers/ug1087/ug1087-zynq-ultrascale-registers.html
+ * CSUDMA Module section
+ */
+REG32(ADDR, 0x0)
+ FIELD(ADDR, ADDR, 2, 30) /* wo */
+REG32(SIZE, 0x4)
+ FIELD(SIZE, SIZE, 2, 27) /* wo */
+ FIELD(SIZE, LAST_WORD, 0, 1) /* rw, only exists in SRC */
+REG32(STATUS, 0x8)
+ FIELD(STATUS, DONE_CNT, 13, 3) /* wtc */
+ FIELD(STATUS, FIFO_LEVEL, 5, 8) /* ro */
+ FIELD(STATUS, OUTSTANDING, 1, 4) /* ro */
+ FIELD(STATUS, BUSY, 0, 1) /* ro */
+REG32(CTRL, 0xc)
+ FIELD(CTRL, FIFOTHRESH, 25, 7) /* rw, only exists in DST, reset 0x40 */
+ FIELD(CTRL, APB_ERR_RESP, 24, 1) /* rw */
+ FIELD(CTRL, ENDIANNESS, 23, 1) /* rw */
+ FIELD(CTRL, AXI_BRST_TYPE, 22, 1) /* rw */
+ FIELD(CTRL, TIMEOUT_VAL, 10, 12) /* rw, reset: 0xFFE */
+ FIELD(CTRL, FIFO_THRESH, 2, 8) /* rw, reset: 0x80 */
+ FIELD(CTRL, PAUSE_STRM, 1, 1) /* rw */
+ FIELD(CTRL, PAUSE_MEM, 0, 1) /* rw */
+REG32(CRC, 0x10)
+REG32(INT_STATUS, 0x14)
+ FIELD(INT_STATUS, FIFO_OVERFLOW, 7, 1) /* wtc */
+ FIELD(INT_STATUS, INVALID_APB, 6, 1) /* wtc */
+ FIELD(INT_STATUS, THRESH_HIT, 5, 1) /* wtc */
+ FIELD(INT_STATUS, TIMEOUT_MEM, 4, 1) /* wtc */
+ FIELD(INT_STATUS, TIMEOUT_STRM, 3, 1) /* wtc */
+ FIELD(INT_STATUS, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
+ FIELD(INT_STATUS, DONE, 1, 1) /* wtc */
+ FIELD(INT_STATUS, MEM_DONE, 0, 1) /* wtc */
+REG32(INT_ENABLE, 0x18)
+ FIELD(INT_ENABLE, FIFO_OVERFLOW, 7, 1) /* wtc */
+ FIELD(INT_ENABLE, INVALID_APB, 6, 1) /* wtc */
+ FIELD(INT_ENABLE, THRESH_HIT, 5, 1) /* wtc */
+ FIELD(INT_ENABLE, TIMEOUT_MEM, 4, 1) /* wtc */
+ FIELD(INT_ENABLE, TIMEOUT_STRM, 3, 1) /* wtc */
+ FIELD(INT_ENABLE, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
+ FIELD(INT_ENABLE, DONE, 1, 1) /* wtc */
+ FIELD(INT_ENABLE, MEM_DONE, 0, 1) /* wtc */
+REG32(INT_DISABLE, 0x1c)
+ FIELD(INT_DISABLE, FIFO_OVERFLOW, 7, 1) /* wtc */
+ FIELD(INT_DISABLE, INVALID_APB, 6, 1) /* wtc */
+ FIELD(INT_DISABLE, THRESH_HIT, 5, 1) /* wtc */
+ FIELD(INT_DISABLE, TIMEOUT_MEM, 4, 1) /* wtc */
+ FIELD(INT_DISABLE, TIMEOUT_STRM, 3, 1) /* wtc */
+ FIELD(INT_DISABLE, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
+ FIELD(INT_DISABLE, DONE, 1, 1) /* wtc */
+ FIELD(INT_DISABLE, MEM_DONE, 0, 1) /* wtc */
+REG32(INT_MASK, 0x20)
+ FIELD(INT_MASK, FIFO_OVERFLOW, 7, 1) /* ro, reset: 0x1 */
+ FIELD(INT_MASK, INVALID_APB, 6, 1) /* ro, reset: 0x1 */
+ FIELD(INT_MASK, THRESH_HIT, 5, 1) /* ro, reset: 0x1 */
+ FIELD(INT_MASK, TIMEOUT_MEM, 4, 1) /* ro, reset: 0x1 */
+ FIELD(INT_MASK, TIMEOUT_STRM, 3, 1) /* ro, reset: 0x1 */
+ FIELD(INT_MASK, AXI_BRESP_ERR, 2, 1) /* ro, reset: 0x1, SRC: AXI_RDERR */
+ FIELD(INT_MASK, DONE, 1, 1) /* ro, reset: 0x1 */
+ FIELD(INT_MASK, MEM_DONE, 0, 1) /* ro, reset: 0x1 */
+REG32(CTRL2, 0x24)
+ FIELD(CTRL2, ARCACHE, 24, 3) /* rw */
+ FIELD(CTRL2, ROUTE_BIT, 23, 1) /* rw */
+ FIELD(CTRL2, TIMEOUT_EN, 22, 1) /* rw */
+ FIELD(CTRL2, TIMEOUT_PRE, 4, 12) /* rw, reset: 0xFFF */
+ FIELD(CTRL2, MAX_OUTS_CMDS, 0, 4) /* rw, reset: 0x8 */
+REG32(ADDR_MSB, 0x28)
+ FIELD(ADDR_MSB, ADDR_MSB, 0, 17) /* wo */
+
+#define R_CTRL_TIMEOUT_VAL_RESET (0xFFE)
+#define R_CTRL_FIFO_THRESH_RESET (0x80)
+#define R_CTRL_FIFOTHRESH_RESET (0x40)
+
+#define R_CTRL2_TIMEOUT_PRE_RESET (0xFFF)
+#define R_CTRL2_MAX_OUTS_CMDS_RESET (0x8)
+
+#define XLNX_CSU_DMA_ERR_DEBUG (0)
+#define XLNX_CSU_DMA_INT_R_MASK (0xff)
+
+/* UG1807: Set the prescaler value for the timeout in clk (~2.5ns) cycles */
+#define XLNX_CSU_DMA_TIMER_FREQ (400 * 1000 * 1000)
+
+static bool xlnx_csu_dma_is_paused(XlnxCSUDMA *s)
+{
+ bool paused;
+
+ paused = !!(s->regs[R_CTRL] & R_CTRL_PAUSE_STRM_MASK);
+ paused |= !!(s->regs[R_CTRL] & R_CTRL_PAUSE_MEM_MASK);
+
+ return paused;
+}
+
+static bool xlnx_csu_dma_get_eop(XlnxCSUDMA *s)
+{
+ return s->r_size_last_word;
+}
+
+static bool xlnx_csu_dma_burst_is_fixed(XlnxCSUDMA *s)
+{
+ return !!(s->regs[R_CTRL] & R_CTRL_AXI_BRST_TYPE_MASK);
+}
+
+static bool xlnx_csu_dma_timeout_enabled(XlnxCSUDMA *s)
+{
+ return !!(s->regs[R_CTRL2] & R_CTRL2_TIMEOUT_EN_MASK);
+}
+
+static void xlnx_csu_dma_update_done_cnt(XlnxCSUDMA *s, int a)
+{
+ int cnt;
+
+ /* Increase DONE_CNT */
+ cnt = ARRAY_FIELD_EX32(s->regs, STATUS, DONE_CNT) + a;
+ ARRAY_FIELD_DP32(s->regs, STATUS, DONE_CNT, cnt);
+}
+
+static void xlnx_csu_dma_data_process(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
+{
+ uint32_t bswap;
+ uint32_t i;
+
+ bswap = s->regs[R_CTRL] & R_CTRL_ENDIANNESS_MASK;
+ if (s->is_dst && !bswap) {
+ /* Fast when ENDIANNESS cleared */
+ return;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ uint8_t *b = &buf[i];
+ union {
+ uint8_t u8[4];
+ uint32_t u32;
+ } v = {
+ .u8 = { b[0], b[1], b[2], b[3] }
+ };
+
+ if (!s->is_dst) {
+ s->regs[R_CRC] += v.u32;
+ }
+ if (bswap) {
+ /*
+ * No point using bswap, we need to writeback
+ * into a potentially unaligned pointer.
+ */
+ b[0] = v.u8[3];
+ b[1] = v.u8[2];
+ b[2] = v.u8[1];
+ b[3] = v.u8[0];
+ }
+ }
+}
+
+static void xlnx_csu_dma_update_irq(XlnxCSUDMA *s)
+{
+ qemu_set_irq(s->irq, !!(s->regs[R_INT_STATUS] & ~s->regs[R_INT_MASK]));
+}
+
+/* len is in bytes */
+static uint32_t xlnx_csu_dma_read(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
+{
+ hwaddr addr = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
+ MemTxResult result = MEMTX_OK;
+
+ if (xlnx_csu_dma_burst_is_fixed(s)) {
+ uint32_t i;
+
+ for (i = 0; i < len && (result == MEMTX_OK); i += s->width) {
+ uint32_t mlen = MIN(len - i, s->width);
+
+ result = address_space_rw(&s->dma_as, addr, s->attr,
+ buf + i, mlen, false);
+ }
+ } else {
+ result = address_space_rw(&s->dma_as, addr, s->attr, buf, len, false);
+ }
+
+ if (result == MEMTX_OK) {
+ xlnx_csu_dma_data_process(s, buf, len);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " TARGET_FMT_plx
+ " for mem read", __func__, addr);
+ s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK;
+ xlnx_csu_dma_update_irq(s);
+ }
+ return len;
+}
+
+/* len is in bytes */
+static uint32_t xlnx_csu_dma_write(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
+{
+ hwaddr addr = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
+ MemTxResult result = MEMTX_OK;
+
+ xlnx_csu_dma_data_process(s, buf, len);
+ if (xlnx_csu_dma_burst_is_fixed(s)) {
+ uint32_t i;
+
+ for (i = 0; i < len && (result == MEMTX_OK); i += s->width) {
+ uint32_t mlen = MIN(len - i, s->width);
+
+ result = address_space_rw(&s->dma_as, addr, s->attr,
+ buf, mlen, true);
+ buf += mlen;
+ }
+ } else {
+ result = address_space_rw(&s->dma_as, addr, s->attr, buf, len, true);
+ }
+
+ if (result != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " TARGET_FMT_plx
+ " for mem write", __func__, addr);
+ s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK;
+ xlnx_csu_dma_update_irq(s);
+ }
+ return len;
+}
+
+static void xlnx_csu_dma_done(XlnxCSUDMA *s)
+{
+ s->regs[R_STATUS] &= ~R_STATUS_BUSY_MASK;
+ s->regs[R_INT_STATUS] |= R_INT_STATUS_DONE_MASK;
+
+ if (!s->is_dst) {
+ s->regs[R_INT_STATUS] |= R_INT_STATUS_MEM_DONE_MASK;
+ }
+
+ xlnx_csu_dma_update_done_cnt(s, 1);
+}
+
+static uint32_t xlnx_csu_dma_advance(XlnxCSUDMA *s, uint32_t len)
+{
+ uint32_t size = s->regs[R_SIZE];
+ hwaddr dst = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
+
+ assert(len <= size);
+
+ size -= len;
+ s->regs[R_SIZE] = size;
+
+ if (!xlnx_csu_dma_burst_is_fixed(s)) {
+ dst += len;
+ s->regs[R_ADDR] = (uint32_t) dst;
+ s->regs[R_ADDR_MSB] = dst >> 32;
+ }
+
+ if (size == 0) {
+ xlnx_csu_dma_done(s);
+ }
+
+ return size;
+}
+
+static void xlnx_csu_dma_src_notify(void *opaque)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(opaque);
+ unsigned char buf[4 * 1024];
+ size_t rlen = 0;
+
+ ptimer_transaction_begin(s->src_timer);
+ /* Stop the backpreassure timer */
+ ptimer_stop(s->src_timer);
+
+ while (s->regs[R_SIZE] && !xlnx_csu_dma_is_paused(s) &&
+ stream_can_push(s->tx_dev, xlnx_csu_dma_src_notify, s)) {
+ uint32_t plen = MIN(s->regs[R_SIZE], sizeof buf);
+ bool eop = false;
+
+ /* Did we fit it all? */
+ if (s->regs[R_SIZE] == plen && xlnx_csu_dma_get_eop(s)) {
+ eop = true;
+ }
+
+ /* DMA transfer */
+ xlnx_csu_dma_read(s, buf, plen);
+ rlen = stream_push(s->tx_dev, buf, plen, eop);
+ xlnx_csu_dma_advance(s, rlen);
+ }
+
+ if (xlnx_csu_dma_timeout_enabled(s) && s->regs[R_SIZE] &&
+ !stream_can_push(s->tx_dev, xlnx_csu_dma_src_notify, s)) {
+ uint32_t timeout = ARRAY_FIELD_EX32(s->regs, CTRL, TIMEOUT_VAL);
+ uint32_t div = ARRAY_FIELD_EX32(s->regs, CTRL2, TIMEOUT_PRE) + 1;
+ uint32_t freq = XLNX_CSU_DMA_TIMER_FREQ;
+
+ freq /= div;
+ ptimer_set_freq(s->src_timer, freq);
+ ptimer_set_count(s->src_timer, timeout);
+ ptimer_run(s->src_timer, 1);
+ }
+
+ ptimer_transaction_commit(s->src_timer);
+ xlnx_csu_dma_update_irq(s);
+}
+
+static uint64_t addr_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ /* Address is word aligned */
+ return val & R_ADDR_ADDR_MASK;
+}
+
+static uint64_t size_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ if (s->regs[R_SIZE] != 0) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Starting DMA while already running.\n", __func__);
+ }
+
+ if (!s->is_dst) {
+ s->r_size_last_word = !!(val & R_SIZE_LAST_WORD_MASK);
+ }
+
+ /* Size is word aligned */
+ return val & R_SIZE_SIZE_MASK;
+}
+
+static uint64_t size_post_read(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ return val | s->r_size_last_word;
+}
+
+static void size_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ s->regs[R_STATUS] |= R_STATUS_BUSY_MASK;
+
+ /*
+ * Note that if SIZE is programmed to 0, and the DMA is started,
+ * the interrupts DONE and MEM_DONE will be asserted.
+ */
+ if (s->regs[R_SIZE] == 0) {
+ xlnx_csu_dma_done(s);
+ xlnx_csu_dma_update_irq(s);
+ return;
+ }
+
+ /* Set SIZE is considered the last step in transfer configuration */
+ if (!s->is_dst) {
+ xlnx_csu_dma_src_notify(s);
+ } else {
+ if (s->notify) {
+ s->notify(s->notify_opaque);
+ }
+ }
+}
+
+static uint64_t status_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ return val & (R_STATUS_DONE_CNT_MASK | R_STATUS_BUSY_MASK);
+}
+
+static void ctrl_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ if (!s->is_dst) {
+ if (!xlnx_csu_dma_is_paused(s)) {
+ xlnx_csu_dma_src_notify(s);
+ }
+ } else {
+ if (!xlnx_csu_dma_is_paused(s) && s->notify) {
+ s->notify(s->notify_opaque);
+ }
+ }
+}
+
+static uint64_t int_status_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ /* DMA counter decrements when flag 'DONE' is cleared */
+ if ((val & s->regs[R_INT_STATUS] & R_INT_STATUS_DONE_MASK)) {
+ xlnx_csu_dma_update_done_cnt(s, -1);
+ }
+
+ return s->regs[R_INT_STATUS] & ~val;
+}
+
+static void int_status_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ xlnx_csu_dma_update_irq(s);
+}
+
+static uint64_t int_enable_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+ uint32_t v32 = val;
+
+ /*
+ * R_INT_ENABLE doesn't have its own state.
+ * It is used to indirectly modify R_INT_MASK.
+ *
+ * 1: Enable this interrupt field (the mask bit will be cleared to 0)
+ * 0: No effect
+ */
+ s->regs[R_INT_MASK] &= ~v32;
+ return 0;
+}
+
+static void int_enable_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ xlnx_csu_dma_update_irq(s);
+}
+
+static uint64_t int_disable_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+ uint32_t v32 = val;
+
+ /*
+ * R_INT_DISABLE doesn't have its own state.
+ * It is used to indirectly modify R_INT_MASK.
+ *
+ * 1: Disable this interrupt field (the mask bit will be set to 1)
+ * 0: No effect
+ */
+ s->regs[R_INT_MASK] |= v32;
+ return 0;
+}
+
+static void int_disable_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
+
+ xlnx_csu_dma_update_irq(s);
+}
+
+static uint64_t addr_msb_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ return val & R_ADDR_MSB_ADDR_MSB_MASK;
+}
+
+static const RegisterAccessInfo *xlnx_csu_dma_regs_info[] = {
+#define DMACH_REGINFO(NAME, snd) \
+ (const RegisterAccessInfo []) { \
+ { \
+ .name = #NAME "_ADDR", \
+ .addr = A_ADDR, \
+ .pre_write = addr_pre_write \
+ }, { \
+ .name = #NAME "_SIZE", \
+ .addr = A_SIZE, \
+ .pre_write = size_pre_write, \
+ .post_write = size_post_write, \
+ .post_read = size_post_read \
+ }, { \
+ .name = #NAME "_STATUS", \
+ .addr = A_STATUS, \
+ .pre_write = status_pre_write, \
+ .w1c = R_STATUS_DONE_CNT_MASK, \
+ .ro = (R_STATUS_BUSY_MASK \
+ | R_STATUS_FIFO_LEVEL_MASK \
+ | R_STATUS_OUTSTANDING_MASK) \
+ }, { \
+ .name = #NAME "_CTRL", \
+ .addr = A_CTRL, \
+ .post_write = ctrl_post_write, \
+ .reset = ((R_CTRL_TIMEOUT_VAL_RESET << R_CTRL_TIMEOUT_VAL_SHIFT) \
+ | (R_CTRL_FIFO_THRESH_RESET << R_CTRL_FIFO_THRESH_SHIFT)\
+ | (snd ? 0 : R_CTRL_FIFOTHRESH_RESET \
+ << R_CTRL_FIFOTHRESH_SHIFT)) \
+ }, { \
+ .name = #NAME "_CRC", \
+ .addr = A_CRC, \
+ }, { \
+ .name = #NAME "_INT_STATUS", \
+ .addr = A_INT_STATUS, \
+ .pre_write = int_status_pre_write, \
+ .post_write = int_status_post_write \
+ }, { \
+ .name = #NAME "_INT_ENABLE", \
+ .addr = A_INT_ENABLE, \
+ .pre_write = int_enable_pre_write, \
+ .post_write = int_enable_post_write \
+ }, { \
+ .name = #NAME "_INT_DISABLE", \
+ .addr = A_INT_DISABLE, \
+ .pre_write = int_disable_pre_write, \
+ .post_write = int_disable_post_write \
+ }, { \
+ .name = #NAME "_INT_MASK", \
+ .addr = A_INT_MASK, \
+ .ro = ~0, \
+ .reset = XLNX_CSU_DMA_INT_R_MASK \
+ }, { \
+ .name = #NAME "_CTRL2", \
+ .addr = A_CTRL2, \
+ .reset = ((R_CTRL2_TIMEOUT_PRE_RESET \
+ << R_CTRL2_TIMEOUT_PRE_SHIFT) \
+ | (R_CTRL2_MAX_OUTS_CMDS_RESET \
+ << R_CTRL2_MAX_OUTS_CMDS_SHIFT)) \
+ }, { \
+ .name = #NAME "_ADDR_MSB", \
+ .addr = A_ADDR_MSB, \
+ .pre_write = addr_msb_pre_write \
+ } \
+ }
+
+ DMACH_REGINFO(DMA_SRC, true),
+ DMACH_REGINFO(DMA_DST, false)
+};
+
+static const MemoryRegionOps xlnx_csu_dma_ops = {
+ .read = register_read_memory,
+ .write = register_write_memory,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+static void xlnx_csu_dma_src_timeout_hit(void *opaque)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(opaque);
+
+ /* Ignore if the timeout is masked */
+ if (!xlnx_csu_dma_timeout_enabled(s)) {
+ return;
+ }
+
+ s->regs[R_INT_STATUS] |= R_INT_STATUS_TIMEOUT_STRM_MASK;
+ xlnx_csu_dma_update_irq(s);
+}
+
+static size_t xlnx_csu_dma_stream_push(StreamSink *obj, uint8_t *buf,
+ size_t len, bool eop)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
+ uint32_t size = s->regs[R_SIZE];
+ uint32_t mlen = MIN(size, len) & (~3); /* Size is word aligned */
+
+ /* Be called when it's DST */
+ assert(s->is_dst);
+
+ if (size == 0 || len <= 0) {
+ return 0;
+ }
+
+ if (len && (xlnx_csu_dma_is_paused(s) || mlen == 0)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "csu-dma: DST channel dropping %zd b of data.\n", len);
+ s->regs[R_INT_STATUS] |= R_INT_STATUS_FIFO_OVERFLOW_MASK;
+ return len;
+ }
+
+ if (xlnx_csu_dma_write(s, buf, mlen) != mlen) {
+ return 0;
+ }
+
+ xlnx_csu_dma_advance(s, mlen);
+ xlnx_csu_dma_update_irq(s);
+
+ return mlen;
+}
+
+static bool xlnx_csu_dma_stream_can_push(StreamSink *obj,
+ StreamCanPushNotifyFn notify,
+ void *notify_opaque)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
+
+ if (s->regs[R_SIZE] != 0) {
+ return true;
+ } else {
+ s->notify = notify;
+ s->notify_opaque = notify_opaque;
+ return false;
+ }
+}
+
+static void xlnx_csu_dma_reset(DeviceState *dev)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(dev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
+ register_reset(&s->regs_info[i]);
+ }
+}
+
+static void xlnx_csu_dma_realize(DeviceState *dev, Error **errp)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(dev);
+ RegisterInfoArray *reg_array;
+
+ if (!s->is_dst && !s->tx_dev) {
+ error_setg(errp, "zynqmp.csu-dma: Stream not connected");
+ return;
+ }
+
+ if (!s->dma_mr) {
+ error_setg(errp, TYPE_XLNX_CSU_DMA " 'dma' link not set");
+ return;
+ }
+ address_space_init(&s->dma_as, s->dma_mr, "csu-dma");
+
+ reg_array =
+ register_init_block32(dev, xlnx_csu_dma_regs_info[!!s->is_dst],
+ XLNX_CSU_DMA_R_MAX,
+ s->regs_info, s->regs,
+ &xlnx_csu_dma_ops,
+ XLNX_CSU_DMA_ERR_DEBUG,
+ XLNX_CSU_DMA_R_MAX * 4);
+ memory_region_add_subregion(&s->iomem,
+ 0x0,
+ &reg_array->mem);
+
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
+
+ s->src_timer = ptimer_init(xlnx_csu_dma_src_timeout_hit,
+ s, PTIMER_POLICY_DEFAULT);
+
+ s->attr = MEMTXATTRS_UNSPECIFIED;
+
+ s->r_size_last_word = 0;
+}
+
+static const VMStateDescription vmstate_xlnx_csu_dma = {
+ .name = TYPE_XLNX_CSU_DMA,
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_PTIMER(src_timer, XlnxCSUDMA),
+ VMSTATE_UINT16(width, XlnxCSUDMA),
+ VMSTATE_BOOL(is_dst, XlnxCSUDMA),
+ VMSTATE_BOOL(r_size_last_word, XlnxCSUDMA),
+ VMSTATE_UINT32_ARRAY(regs, XlnxCSUDMA, XLNX_CSU_DMA_R_MAX),
+ VMSTATE_END_OF_LIST(),
+ }
+};
+
+static Property xlnx_csu_dma_properties[] = {
+ /*
+ * Ref PG021, Stream Data Width:
+ * Data width in bits of the AXI S2MM AXI4-Stream Data bus.
+ * This value must be equal or less than the Memory Map Data Width.
+ * Valid values are 8, 16, 32, 64, 128, 512 and 1024.
+ * "dma-width" is the byte value of the "Stream Data Width".
+ */
+ DEFINE_PROP_UINT16("dma-width", XlnxCSUDMA, width, 4),
+ /*
+ * The CSU DMA is a two-channel, simple DMA, allowing separate control of
+ * the SRC (read) channel and DST (write) channel. "is-dst" is used to mark
+ * which channel the device is connected to.
+ */
+ DEFINE_PROP_BOOL("is-dst", XlnxCSUDMA, is_dst, true),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xlnx_csu_dma_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ StreamSinkClass *ssc = STREAM_SINK_CLASS(klass);
+
+ dc->reset = xlnx_csu_dma_reset;
+ dc->realize = xlnx_csu_dma_realize;
+ dc->vmsd = &vmstate_xlnx_csu_dma;
+ device_class_set_props(dc, xlnx_csu_dma_properties);
+
+ ssc->push = xlnx_csu_dma_stream_push;
+ ssc->can_push = xlnx_csu_dma_stream_can_push;
+}
+
+static void xlnx_csu_dma_init(Object *obj)
+{
+ XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
+
+ memory_region_init(&s->iomem, obj, TYPE_XLNX_CSU_DMA,
+ XLNX_CSU_DMA_R_MAX * 4);
+
+ object_property_add_link(obj, "stream-connected-dma", TYPE_STREAM_SINK,
+ (Object **)&s->tx_dev,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_STRONG);
+ object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
+ (Object **)&s->dma_mr,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_STRONG);
+}
+
+static const TypeInfo xlnx_csu_dma_info = {
+ .name = TYPE_XLNX_CSU_DMA,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XlnxCSUDMA),
+ .class_init = xlnx_csu_dma_class_init,
+ .instance_init = xlnx_csu_dma_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_STREAM_SINK },
+ { }
+ }
+};
+
+static void xlnx_csu_dma_register_types(void)
+{
+ type_register_static(&xlnx_csu_dma_info);
+}
+
+type_init(xlnx_csu_dma_register_types)
diff --git a/hw/dma/xlnx_dpdma.c b/hw/dma/xlnx_dpdma.c
new file mode 100644
index 000000000..967548abd
--- /dev/null
+++ b/hw/dma/xlnx_dpdma.c
@@ -0,0 +1,790 @@
+/*
+ * xlnx_dpdma.c
+ *
+ * Copyright (C) 2015 : GreenSocs Ltd
+ * http://www.greensocs.com/ , email: info@greensocs.com
+ *
+ * Developed by :
+ * Frederic Konrad <fred.konrad@greensocs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "hw/dma/xlnx_dpdma.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+
+#ifndef DEBUG_DPDMA
+#define DEBUG_DPDMA 0
+#endif
+
+#define DPRINTF(fmt, ...) do { \
+ if (DEBUG_DPDMA) { \
+ qemu_log("xlnx_dpdma: " fmt , ## __VA_ARGS__); \
+ } \
+} while (0)
+
+/*
+ * Registers offset for DPDMA.
+ */
+#define DPDMA_ERR_CTRL (0x0000)
+#define DPDMA_ISR (0x0004 >> 2)
+#define DPDMA_IMR (0x0008 >> 2)
+#define DPDMA_IEN (0x000C >> 2)
+#define DPDMA_IDS (0x0010 >> 2)
+#define DPDMA_EISR (0x0014 >> 2)
+#define DPDMA_EIMR (0x0018 >> 2)
+#define DPDMA_EIEN (0x001C >> 2)
+#define DPDMA_EIDS (0x0020 >> 2)
+#define DPDMA_CNTL (0x0100 >> 2)
+
+#define DPDMA_GBL (0x0104 >> 2)
+#define DPDMA_GBL_TRG_CH(n) (1 << n)
+#define DPDMA_GBL_RTRG_CH(n) (1 << 6 << n)
+
+#define DPDMA_ALC0_CNTL (0x0108 >> 2)
+#define DPDMA_ALC0_STATUS (0x010C >> 2)
+#define DPDMA_ALC0_MAX (0x0110 >> 2)
+#define DPDMA_ALC0_MIN (0x0114 >> 2)
+#define DPDMA_ALC0_ACC (0x0118 >> 2)
+#define DPDMA_ALC0_ACC_TRAN (0x011C >> 2)
+#define DPDMA_ALC1_CNTL (0x0120 >> 2)
+#define DPDMA_ALC1_STATUS (0x0124 >> 2)
+#define DPDMA_ALC1_MAX (0x0128 >> 2)
+#define DPDMA_ALC1_MIN (0x012C >> 2)
+#define DPDMA_ALC1_ACC (0x0130 >> 2)
+#define DPDMA_ALC1_ACC_TRAN (0x0134 >> 2)
+
+#define DPDMA_DSCR_STRT_ADDRE_CH(n) ((0x0200 + n * 0x100) >> 2)
+#define DPDMA_DSCR_STRT_ADDR_CH(n) ((0x0204 + n * 0x100) >> 2)
+#define DPDMA_DSCR_NEXT_ADDRE_CH(n) ((0x0208 + n * 0x100) >> 2)
+#define DPDMA_DSCR_NEXT_ADDR_CH(n) ((0x020C + n * 0x100) >> 2)
+#define DPDMA_PYLD_CUR_ADDRE_CH(n) ((0x0210 + n * 0x100) >> 2)
+#define DPDMA_PYLD_CUR_ADDR_CH(n) ((0x0214 + n * 0x100) >> 2)
+
+#define DPDMA_CNTL_CH(n) ((0x0218 + n * 0x100) >> 2)
+#define DPDMA_CNTL_CH_EN (1)
+#define DPDMA_CNTL_CH_PAUSED (1 << 1)
+
+#define DPDMA_STATUS_CH(n) ((0x021C + n * 0x100) >> 2)
+#define DPDMA_STATUS_BURST_TYPE (1 << 4)
+#define DPDMA_STATUS_MODE (1 << 5)
+#define DPDMA_STATUS_EN_CRC (1 << 6)
+#define DPDMA_STATUS_LAST_DSCR (1 << 7)
+#define DPDMA_STATUS_LDSCR_FRAME (1 << 8)
+#define DPDMA_STATUS_IGNR_DONE (1 << 9)
+#define DPDMA_STATUS_DSCR_DONE (1 << 10)
+#define DPDMA_STATUS_EN_DSCR_UP (1 << 11)
+#define DPDMA_STATUS_EN_DSCR_INTR (1 << 12)
+#define DPDMA_STATUS_PREAMBLE_OFF (13)
+
+#define DPDMA_VDO_CH(n) ((0x0220 + n * 0x100) >> 2)
+#define DPDMA_PYLD_SZ_CH(n) ((0x0224 + n * 0x100) >> 2)
+#define DPDMA_DSCR_ID_CH(n) ((0x0228 + n * 0x100) >> 2)
+
+/*
+ * Descriptor control field.
+ */
+#define CONTROL_PREAMBLE_VALUE 0xA5
+
+#define DSCR_CTRL_PREAMBLE 0xFF
+#define DSCR_CTRL_EN_DSCR_DONE_INTR (1 << 8)
+#define DSCR_CTRL_EN_DSCR_UPDATE (1 << 9)
+#define DSCR_CTRL_IGNORE_DONE (1 << 10)
+#define DSCR_CTRL_AXI_BURST_TYPE (1 << 11)
+#define DSCR_CTRL_AXCACHE (0x0F << 12)
+#define DSCR_CTRL_AXPROT (0x2 << 16)
+#define DSCR_CTRL_DESCRIPTOR_MODE (1 << 18)
+#define DSCR_CTRL_LAST_DESCRIPTOR (1 << 19)
+#define DSCR_CTRL_ENABLE_CRC (1 << 20)
+#define DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME (1 << 21)
+
+/*
+ * Descriptor timestamp field.
+ */
+#define STATUS_DONE (1 << 31)
+
+#define DPDMA_FRAG_MAX_SZ (4096)
+
+enum DPDMABurstType {
+ DPDMA_INCR = 0,
+ DPDMA_FIXED = 1
+};
+
+enum DPDMAMode {
+ DPDMA_CONTIGOUS = 0,
+ DPDMA_FRAGMENTED = 1
+};
+
+struct DPDMADescriptor {
+ uint32_t control;
+ uint32_t descriptor_id;
+ /* transfer size in byte. */
+ uint32_t xfer_size;
+ uint32_t line_size_stride;
+ uint32_t timestamp_lsb;
+ uint32_t timestamp_msb;
+ /* contains extension for both descriptor and source. */
+ uint32_t address_extension;
+ uint32_t next_descriptor;
+ uint32_t source_address;
+ uint32_t address_extension_23;
+ uint32_t address_extension_45;
+ uint32_t source_address2;
+ uint32_t source_address3;
+ uint32_t source_address4;
+ uint32_t source_address5;
+ uint32_t crc;
+};
+
+typedef enum DPDMABurstType DPDMABurstType;
+typedef enum DPDMAMode DPDMAMode;
+typedef struct DPDMADescriptor DPDMADescriptor;
+
+static bool xlnx_dpdma_desc_is_last(DPDMADescriptor *desc)
+{
+ return ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR) != 0);
+}
+
+static bool xlnx_dpdma_desc_is_last_of_frame(DPDMADescriptor *desc)
+{
+ return ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME) != 0);
+}
+
+static uint64_t xlnx_dpdma_desc_get_source_address(DPDMADescriptor *desc,
+ uint8_t frag)
+{
+ uint64_t addr = 0;
+ assert(frag < 5);
+
+ switch (frag) {
+ case 0:
+ addr = desc->source_address
+ + (extract32(desc->address_extension, 16, 12) << 20);
+ break;
+ case 1:
+ addr = desc->source_address2
+ + (extract32(desc->address_extension_23, 0, 12) << 8);
+ break;
+ case 2:
+ addr = desc->source_address3
+ + (extract32(desc->address_extension_23, 16, 12) << 20);
+ break;
+ case 3:
+ addr = desc->source_address4
+ + (extract32(desc->address_extension_45, 0, 12) << 8);
+ break;
+ case 4:
+ addr = desc->source_address5
+ + (extract32(desc->address_extension_45, 16, 12) << 20);
+ break;
+ default:
+ addr = 0;
+ break;
+ }
+
+ return addr;
+}
+
+static uint32_t xlnx_dpdma_desc_get_transfer_size(DPDMADescriptor *desc)
+{
+ return desc->xfer_size;
+}
+
+static uint32_t xlnx_dpdma_desc_get_line_size(DPDMADescriptor *desc)
+{
+ return extract32(desc->line_size_stride, 0, 18);
+}
+
+static uint32_t xlnx_dpdma_desc_get_line_stride(DPDMADescriptor *desc)
+{
+ return extract32(desc->line_size_stride, 18, 14) * 16;
+}
+
+static inline bool xlnx_dpdma_desc_crc_enabled(DPDMADescriptor *desc)
+{
+ return (desc->control & DSCR_CTRL_ENABLE_CRC) != 0;
+}
+
+static inline bool xlnx_dpdma_desc_check_crc(DPDMADescriptor *desc)
+{
+ uint32_t *p = (uint32_t *)desc;
+ uint32_t crc = 0;
+ uint8_t i;
+
+ /*
+ * CRC is calculated on the whole descriptor except the last 32bits word
+ * using 32bits addition.
+ */
+ for (i = 0; i < 15; i++) {
+ crc += p[i];
+ }
+
+ return crc == desc->crc;
+}
+
+static inline bool xlnx_dpdma_desc_completion_interrupt(DPDMADescriptor *desc)
+{
+ return (desc->control & DSCR_CTRL_EN_DSCR_DONE_INTR) != 0;
+}
+
+static inline bool xlnx_dpdma_desc_is_valid(DPDMADescriptor *desc)
+{
+ return (desc->control & DSCR_CTRL_PREAMBLE) == CONTROL_PREAMBLE_VALUE;
+}
+
+static inline bool xlnx_dpdma_desc_is_contiguous(DPDMADescriptor *desc)
+{
+ return (desc->control & DSCR_CTRL_DESCRIPTOR_MODE) == 0;
+}
+
+static inline bool xlnx_dpdma_desc_update_enabled(DPDMADescriptor *desc)
+{
+ return (desc->control & DSCR_CTRL_EN_DSCR_UPDATE) != 0;
+}
+
+static inline void xlnx_dpdma_desc_set_done(DPDMADescriptor *desc)
+{
+ desc->timestamp_msb |= STATUS_DONE;
+}
+
+static inline bool xlnx_dpdma_desc_is_already_done(DPDMADescriptor *desc)
+{
+ return (desc->timestamp_msb & STATUS_DONE) != 0;
+}
+
+static inline bool xlnx_dpdma_desc_ignore_done_bit(DPDMADescriptor *desc)
+{
+ return (desc->control & DSCR_CTRL_IGNORE_DONE) != 0;
+}
+
+static const VMStateDescription vmstate_xlnx_dpdma = {
+ .name = TYPE_XLNX_DPDMA,
+ .version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(registers, XlnxDPDMAState,
+ XLNX_DPDMA_REG_ARRAY_SIZE),
+ VMSTATE_BOOL_ARRAY(operation_finished, XlnxDPDMAState, 6),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void xlnx_dpdma_update_irq(XlnxDPDMAState *s)
+{
+ bool flags;
+
+ flags = ((s->registers[DPDMA_ISR] & (~s->registers[DPDMA_IMR]))
+ || (s->registers[DPDMA_EISR] & (~s->registers[DPDMA_EIMR])));
+ qemu_set_irq(s->irq, flags);
+}
+
+static uint64_t xlnx_dpdma_descriptor_start_address(XlnxDPDMAState *s,
+ uint8_t channel)
+{
+ return (s->registers[DPDMA_DSCR_STRT_ADDRE_CH(channel)] << 16)
+ + s->registers[DPDMA_DSCR_STRT_ADDR_CH(channel)];
+}
+
+static uint64_t xlnx_dpdma_descriptor_next_address(XlnxDPDMAState *s,
+ uint8_t channel)
+{
+ return ((uint64_t)s->registers[DPDMA_DSCR_NEXT_ADDRE_CH(channel)] << 32)
+ + s->registers[DPDMA_DSCR_NEXT_ADDR_CH(channel)];
+}
+
+static bool xlnx_dpdma_is_channel_enabled(XlnxDPDMAState *s,
+ uint8_t channel)
+{
+ return (s->registers[DPDMA_CNTL_CH(channel)] & DPDMA_CNTL_CH_EN) != 0;
+}
+
+static bool xlnx_dpdma_is_channel_paused(XlnxDPDMAState *s,
+ uint8_t channel)
+{
+ return (s->registers[DPDMA_CNTL_CH(channel)] & DPDMA_CNTL_CH_PAUSED) != 0;
+}
+
+static inline bool xlnx_dpdma_is_channel_retriggered(XlnxDPDMAState *s,
+ uint8_t channel)
+{
+ /* Clear the retriggered bit after reading it. */
+ bool channel_is_retriggered = s->registers[DPDMA_GBL]
+ & DPDMA_GBL_RTRG_CH(channel);
+ s->registers[DPDMA_GBL] &= ~DPDMA_GBL_RTRG_CH(channel);
+ return channel_is_retriggered;
+}
+
+static inline bool xlnx_dpdma_is_channel_triggered(XlnxDPDMAState *s,
+ uint8_t channel)
+{
+ return s->registers[DPDMA_GBL] & DPDMA_GBL_TRG_CH(channel);
+}
+
+static void xlnx_dpdma_update_desc_info(XlnxDPDMAState *s, uint8_t channel,
+ DPDMADescriptor *desc)
+{
+ s->registers[DPDMA_DSCR_NEXT_ADDRE_CH(channel)] =
+ extract32(desc->address_extension, 0, 16);
+ s->registers[DPDMA_DSCR_NEXT_ADDR_CH(channel)] = desc->next_descriptor;
+ s->registers[DPDMA_PYLD_CUR_ADDRE_CH(channel)] =
+ extract32(desc->address_extension, 16, 16);
+ s->registers[DPDMA_PYLD_CUR_ADDR_CH(channel)] = desc->source_address;
+ s->registers[DPDMA_VDO_CH(channel)] =
+ extract32(desc->line_size_stride, 18, 14)
+ + (extract32(desc->line_size_stride, 0, 18)
+ << 14);
+ s->registers[DPDMA_PYLD_SZ_CH(channel)] = desc->xfer_size;
+ s->registers[DPDMA_DSCR_ID_CH(channel)] = desc->descriptor_id;
+
+ /* Compute the status register with the descriptor information. */
+ s->registers[DPDMA_STATUS_CH(channel)] =
+ extract32(desc->control, 0, 8) << 13;
+ if ((desc->control & DSCR_CTRL_EN_DSCR_DONE_INTR) != 0) {
+ s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_DSCR_INTR;
+ }
+ if ((desc->control & DSCR_CTRL_EN_DSCR_UPDATE) != 0) {
+ s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_DSCR_UP;
+ }
+ if ((desc->timestamp_msb & STATUS_DONE) != 0) {
+ s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_DSCR_DONE;
+ }
+ if ((desc->control & DSCR_CTRL_IGNORE_DONE) != 0) {
+ s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_IGNR_DONE;
+ }
+ if ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME) != 0) {
+ s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_LDSCR_FRAME;
+ }
+ if ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR) != 0) {
+ s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_LAST_DSCR;
+ }
+ if ((desc->control & DSCR_CTRL_ENABLE_CRC) != 0) {
+ s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_CRC;
+ }
+ if ((desc->control & DSCR_CTRL_DESCRIPTOR_MODE) != 0) {
+ s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_MODE;
+ }
+ if ((desc->control & DSCR_CTRL_AXI_BURST_TYPE) != 0) {
+ s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_BURST_TYPE;
+ }
+}
+
+static void xlnx_dpdma_dump_descriptor(DPDMADescriptor *desc)
+{
+ if (DEBUG_DPDMA) {
+ qemu_log("DUMP DESCRIPTOR:\n");
+ qemu_hexdump(stdout, "", desc, sizeof(DPDMADescriptor));
+ }
+}
+
+static uint64_t xlnx_dpdma_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ XlnxDPDMAState *s = XLNX_DPDMA(opaque);
+
+ DPRINTF("read @%" HWADDR_PRIx "\n", offset);
+ offset = offset >> 2;
+
+ switch (offset) {
+ /*
+ * Trying to read a write only register.
+ */
+ case DPDMA_GBL:
+ return 0;
+ default:
+ assert(offset <= (0xFFC >> 2));
+ return s->registers[offset];
+ }
+ return 0;
+}
+
+static void xlnx_dpdma_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ XlnxDPDMAState *s = XLNX_DPDMA(opaque);
+
+ DPRINTF("write @%" HWADDR_PRIx " = %" PRIx64 "\n", offset, value);
+ offset = offset >> 2;
+
+ switch (offset) {
+ case DPDMA_ISR:
+ s->registers[DPDMA_ISR] &= ~value;
+ xlnx_dpdma_update_irq(s);
+ break;
+ case DPDMA_IEN:
+ s->registers[DPDMA_IMR] &= ~value;
+ break;
+ case DPDMA_IDS:
+ s->registers[DPDMA_IMR] |= value;
+ break;
+ case DPDMA_EISR:
+ s->registers[DPDMA_EISR] &= ~value;
+ xlnx_dpdma_update_irq(s);
+ break;
+ case DPDMA_EIEN:
+ s->registers[DPDMA_EIMR] &= ~value;
+ break;
+ case DPDMA_EIDS:
+ s->registers[DPDMA_EIMR] |= value;
+ break;
+ case DPDMA_IMR:
+ case DPDMA_EIMR:
+ case DPDMA_DSCR_NEXT_ADDRE_CH(0):
+ case DPDMA_DSCR_NEXT_ADDRE_CH(1):
+ case DPDMA_DSCR_NEXT_ADDRE_CH(2):
+ case DPDMA_DSCR_NEXT_ADDRE_CH(3):
+ case DPDMA_DSCR_NEXT_ADDRE_CH(4):
+ case DPDMA_DSCR_NEXT_ADDRE_CH(5):
+ case DPDMA_DSCR_NEXT_ADDR_CH(0):
+ case DPDMA_DSCR_NEXT_ADDR_CH(1):
+ case DPDMA_DSCR_NEXT_ADDR_CH(2):
+ case DPDMA_DSCR_NEXT_ADDR_CH(3):
+ case DPDMA_DSCR_NEXT_ADDR_CH(4):
+ case DPDMA_DSCR_NEXT_ADDR_CH(5):
+ case DPDMA_PYLD_CUR_ADDRE_CH(0):
+ case DPDMA_PYLD_CUR_ADDRE_CH(1):
+ case DPDMA_PYLD_CUR_ADDRE_CH(2):
+ case DPDMA_PYLD_CUR_ADDRE_CH(3):
+ case DPDMA_PYLD_CUR_ADDRE_CH(4):
+ case DPDMA_PYLD_CUR_ADDRE_CH(5):
+ case DPDMA_PYLD_CUR_ADDR_CH(0):
+ case DPDMA_PYLD_CUR_ADDR_CH(1):
+ case DPDMA_PYLD_CUR_ADDR_CH(2):
+ case DPDMA_PYLD_CUR_ADDR_CH(3):
+ case DPDMA_PYLD_CUR_ADDR_CH(4):
+ case DPDMA_PYLD_CUR_ADDR_CH(5):
+ case DPDMA_STATUS_CH(0):
+ case DPDMA_STATUS_CH(1):
+ case DPDMA_STATUS_CH(2):
+ case DPDMA_STATUS_CH(3):
+ case DPDMA_STATUS_CH(4):
+ case DPDMA_STATUS_CH(5):
+ case DPDMA_VDO_CH(0):
+ case DPDMA_VDO_CH(1):
+ case DPDMA_VDO_CH(2):
+ case DPDMA_VDO_CH(3):
+ case DPDMA_VDO_CH(4):
+ case DPDMA_VDO_CH(5):
+ case DPDMA_PYLD_SZ_CH(0):
+ case DPDMA_PYLD_SZ_CH(1):
+ case DPDMA_PYLD_SZ_CH(2):
+ case DPDMA_PYLD_SZ_CH(3):
+ case DPDMA_PYLD_SZ_CH(4):
+ case DPDMA_PYLD_SZ_CH(5):
+ case DPDMA_DSCR_ID_CH(0):
+ case DPDMA_DSCR_ID_CH(1):
+ case DPDMA_DSCR_ID_CH(2):
+ case DPDMA_DSCR_ID_CH(3):
+ case DPDMA_DSCR_ID_CH(4):
+ case DPDMA_DSCR_ID_CH(5):
+ /*
+ * Trying to write to a read only register..
+ */
+ break;
+ case DPDMA_GBL:
+ /*
+ * This is a write only register so it's read as zero in the read
+ * callback.
+ * We store the value anyway so we can know if the channel is
+ * enabled.
+ */
+ s->registers[offset] |= value & 0x00000FFF;
+ break;
+ case DPDMA_DSCR_STRT_ADDRE_CH(0):
+ case DPDMA_DSCR_STRT_ADDRE_CH(1):
+ case DPDMA_DSCR_STRT_ADDRE_CH(2):
+ case DPDMA_DSCR_STRT_ADDRE_CH(3):
+ case DPDMA_DSCR_STRT_ADDRE_CH(4):
+ case DPDMA_DSCR_STRT_ADDRE_CH(5):
+ value &= 0x0000FFFF;
+ s->registers[offset] = value;
+ break;
+ case DPDMA_CNTL_CH(0):
+ s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(0);
+ value &= 0x3FFFFFFF;
+ s->registers[offset] = value;
+ break;
+ case DPDMA_CNTL_CH(1):
+ s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(1);
+ value &= 0x3FFFFFFF;
+ s->registers[offset] = value;
+ break;
+ case DPDMA_CNTL_CH(2):
+ s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(2);
+ value &= 0x3FFFFFFF;
+ s->registers[offset] = value;
+ break;
+ case DPDMA_CNTL_CH(3):
+ s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(3);
+ value &= 0x3FFFFFFF;
+ s->registers[offset] = value;
+ break;
+ case DPDMA_CNTL_CH(4):
+ s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(4);
+ value &= 0x3FFFFFFF;
+ s->registers[offset] = value;
+ break;
+ case DPDMA_CNTL_CH(5):
+ s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(5);
+ value &= 0x3FFFFFFF;
+ s->registers[offset] = value;
+ break;
+ default:
+ assert(offset <= (0xFFC >> 2));
+ s->registers[offset] = value;
+ break;
+ }
+}
+
+static const MemoryRegionOps dma_ops = {
+ .read = xlnx_dpdma_read,
+ .write = xlnx_dpdma_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void xlnx_dpdma_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ XlnxDPDMAState *s = XLNX_DPDMA(obj);
+
+ memory_region_init_io(&s->iomem, obj, &dma_ops, s,
+ TYPE_XLNX_DPDMA, 0x1000);
+ sysbus_init_mmio(sbd, &s->iomem);
+ sysbus_init_irq(sbd, &s->irq);
+}
+
+static void xlnx_dpdma_reset(DeviceState *dev)
+{
+ XlnxDPDMAState *s = XLNX_DPDMA(dev);
+ size_t i;
+
+ memset(s->registers, 0, sizeof(s->registers));
+ s->registers[DPDMA_IMR] = 0x07FFFFFF;
+ s->registers[DPDMA_EIMR] = 0xFFFFFFFF;
+ s->registers[DPDMA_ALC0_MIN] = 0x0000FFFF;
+ s->registers[DPDMA_ALC1_MIN] = 0x0000FFFF;
+
+ for (i = 0; i < 6; i++) {
+ s->data[i] = NULL;
+ s->operation_finished[i] = true;
+ }
+}
+
+static void xlnx_dpdma_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->vmsd = &vmstate_xlnx_dpdma;
+ dc->reset = xlnx_dpdma_reset;
+}
+
+static const TypeInfo xlnx_dpdma_info = {
+ .name = TYPE_XLNX_DPDMA,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XlnxDPDMAState),
+ .instance_init = xlnx_dpdma_init,
+ .class_init = xlnx_dpdma_class_init,
+};
+
+static void xlnx_dpdma_register_types(void)
+{
+ type_register_static(&xlnx_dpdma_info);
+}
+
+size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel,
+ bool one_desc)
+{
+ uint64_t desc_addr;
+ uint64_t source_addr[6];
+ DPDMADescriptor desc;
+ bool done = false;
+ size_t ptr = 0;
+
+ assert(channel <= 5);
+
+ DPRINTF("start dpdma channel 0x%" PRIX8 "\n", channel);
+
+ if (!xlnx_dpdma_is_channel_triggered(s, channel)) {
+ DPRINTF("Channel isn't triggered..\n");
+ return 0;
+ }
+
+ if (!xlnx_dpdma_is_channel_enabled(s, channel)) {
+ DPRINTF("Channel isn't enabled..\n");
+ return 0;
+ }
+
+ if (xlnx_dpdma_is_channel_paused(s, channel)) {
+ DPRINTF("Channel is paused..\n");
+ return 0;
+ }
+
+ do {
+ if ((s->operation_finished[channel])
+ || xlnx_dpdma_is_channel_retriggered(s, channel)) {
+ desc_addr = xlnx_dpdma_descriptor_start_address(s, channel);
+ s->operation_finished[channel] = false;
+ } else {
+ desc_addr = xlnx_dpdma_descriptor_next_address(s, channel);
+ }
+
+ if (dma_memory_read(&address_space_memory, desc_addr, &desc,
+ sizeof(DPDMADescriptor))) {
+ s->registers[DPDMA_EISR] |= ((1 << 1) << channel);
+ xlnx_dpdma_update_irq(s);
+ s->operation_finished[channel] = true;
+ DPRINTF("Can't get the descriptor.\n");
+ break;
+ }
+
+ xlnx_dpdma_update_desc_info(s, channel, &desc);
+
+#ifdef DEBUG_DPDMA
+ xlnx_dpdma_dump_descriptor(&desc);
+#endif
+
+ DPRINTF("location of the descriptor: %" PRIx64 "\n", desc_addr);
+ if (!xlnx_dpdma_desc_is_valid(&desc)) {
+ s->registers[DPDMA_EISR] |= ((1 << 7) << channel);
+ xlnx_dpdma_update_irq(s);
+ s->operation_finished[channel] = true;
+ DPRINTF("Invalid descriptor..\n");
+ break;
+ }
+
+ if (xlnx_dpdma_desc_crc_enabled(&desc)
+ && !xlnx_dpdma_desc_check_crc(&desc)) {
+ s->registers[DPDMA_EISR] |= ((1 << 13) << channel);
+ xlnx_dpdma_update_irq(s);
+ s->operation_finished[channel] = true;
+ DPRINTF("Bad CRC for descriptor..\n");
+ break;
+ }
+
+ if (xlnx_dpdma_desc_is_already_done(&desc)
+ && !xlnx_dpdma_desc_ignore_done_bit(&desc)) {
+ /* We are trying to process an already processed descriptor. */
+ s->registers[DPDMA_EISR] |= ((1 << 25) << channel);
+ xlnx_dpdma_update_irq(s);
+ s->operation_finished[channel] = true;
+ DPRINTF("Already processed descriptor..\n");
+ break;
+ }
+
+ done = xlnx_dpdma_desc_is_last(&desc)
+ || xlnx_dpdma_desc_is_last_of_frame(&desc);
+
+ s->operation_finished[channel] = done;
+ if (s->data[channel]) {
+ int64_t transfer_len = xlnx_dpdma_desc_get_transfer_size(&desc);
+ uint32_t line_size = xlnx_dpdma_desc_get_line_size(&desc);
+ uint32_t line_stride = xlnx_dpdma_desc_get_line_stride(&desc);
+ if (xlnx_dpdma_desc_is_contiguous(&desc)) {
+ source_addr[0] = xlnx_dpdma_desc_get_source_address(&desc, 0);
+ while (transfer_len != 0) {
+ if (dma_memory_read(&address_space_memory,
+ source_addr[0],
+ &s->data[channel][ptr],
+ line_size)) {
+ s->registers[DPDMA_ISR] |= ((1 << 12) << channel);
+ xlnx_dpdma_update_irq(s);
+ DPRINTF("Can't get data.\n");
+ break;
+ }
+ ptr += line_size;
+ transfer_len -= line_size;
+ source_addr[0] += line_stride;
+ }
+ } else {
+ DPRINTF("Source address:\n");
+ int frag;
+ for (frag = 0; frag < 5; frag++) {
+ source_addr[frag] =
+ xlnx_dpdma_desc_get_source_address(&desc, frag);
+ DPRINTF("Fragment %u: %" PRIx64 "\n", frag + 1,
+ source_addr[frag]);
+ }
+
+ frag = 0;
+ while ((transfer_len < 0) && (frag < 5)) {
+ size_t fragment_len = DPDMA_FRAG_MAX_SZ
+ - (source_addr[frag] % DPDMA_FRAG_MAX_SZ);
+
+ if (dma_memory_read(&address_space_memory,
+ source_addr[frag],
+ &(s->data[channel][ptr]),
+ fragment_len)) {
+ s->registers[DPDMA_ISR] |= ((1 << 12) << channel);
+ xlnx_dpdma_update_irq(s);
+ DPRINTF("Can't get data.\n");
+ break;
+ }
+ ptr += fragment_len;
+ transfer_len -= fragment_len;
+ frag += 1;
+ }
+ }
+ }
+
+ if (xlnx_dpdma_desc_update_enabled(&desc)) {
+ /* The descriptor need to be updated when it's completed. */
+ DPRINTF("update the descriptor with the done flag set.\n");
+ xlnx_dpdma_desc_set_done(&desc);
+ dma_memory_write(&address_space_memory, desc_addr, &desc,
+ sizeof(DPDMADescriptor));
+ }
+
+ if (xlnx_dpdma_desc_completion_interrupt(&desc)) {
+ DPRINTF("completion interrupt enabled!\n");
+ s->registers[DPDMA_ISR] |= (1 << channel);
+ xlnx_dpdma_update_irq(s);
+ }
+
+ } while (!done && !one_desc);
+
+ return ptr;
+}
+
+void xlnx_dpdma_set_host_data_location(XlnxDPDMAState *s, uint8_t channel,
+ void *p)
+{
+ if (!s) {
+ qemu_log_mask(LOG_UNIMP, "DPDMA client not attached to valid DPDMA"
+ " instance\n");
+ return;
+ }
+
+ assert(channel <= 5);
+ s->data[channel] = p;
+}
+
+void xlnx_dpdma_trigger_vsync_irq(XlnxDPDMAState *s)
+{
+ s->registers[DPDMA_ISR] |= (1 << 27);
+ xlnx_dpdma_update_irq(s);
+}
+
+type_init(xlnx_dpdma_register_types)