aboutsummaryrefslogtreecommitdiffstats
path: root/hw/ide
diff options
context:
space:
mode:
authorTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
committerTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
commite02cda008591317b1625707ff8e115a4841aa889 (patch)
treeaee302e3cf8b59ec2d32ec481be3d1afddfc8968 /hw/ide
parentcc668e6b7e0ffd8c9d130513d12053cf5eda1d3b (diff)
Introduce Virtio-loopback epsilon release:
Epsilon release introduces a new compatibility layer which make virtio-loopback design to work with QEMU and rust-vmm vhost-user backend without require any changes. Signed-off-by: Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com> Change-Id: I52e57563e08a7d0bdc002f8e928ee61ba0c53dd9
Diffstat (limited to 'hw/ide')
-rw-r--r--hw/ide/Kconfig59
-rw-r--r--hw/ide/ahci-allwinner.c127
-rw-r--r--hw/ide/ahci.c1843
-rw-r--r--hw/ide/ahci_internal.h393
-rw-r--r--hw/ide/atapi.c1377
-rw-r--r--hw/ide/cmd646.c351
-rw-r--r--hw/ide/core.c2946
-rw-r--r--hw/ide/ich.c197
-rw-r--r--hw/ide/ioport.c68
-rw-r--r--hw/ide/isa.c138
-rw-r--r--hw/ide/macio.c513
-rw-r--r--hw/ide/meson.build14
-rw-r--r--hw/ide/microdrive.c643
-rw-r--r--hw/ide/mmio.c189
-rw-r--r--hw/ide/pci.c534
-rw-r--r--hw/ide/piix.c279
-rw-r--r--hw/ide/qdev.c371
-rw-r--r--hw/ide/sii3112.c322
-rw-r--r--hw/ide/trace-events124
-rw-r--r--hw/ide/trace.h1
-rw-r--r--hw/ide/via.c243
21 files changed, 10732 insertions, 0 deletions
diff --git a/hw/ide/Kconfig b/hw/ide/Kconfig
new file mode 100644
index 000000000..dd85fa361
--- /dev/null
+++ b/hw/ide/Kconfig
@@ -0,0 +1,59 @@
+config IDE_CORE
+ bool
+
+config IDE_QDEV
+ bool
+ select IDE_CORE
+
+config IDE_PCI
+ bool
+ depends on PCI
+ select IDE_QDEV
+
+config IDE_ISA
+ bool
+ depends on ISA_BUS
+ select IDE_QDEV
+
+config IDE_PIIX
+ bool
+ select IDE_PCI
+ select IDE_QDEV
+
+config IDE_CMD646
+ bool
+ select IDE_PCI
+ select IDE_QDEV
+
+config IDE_MACIO
+ bool
+ select IDE_QDEV
+
+config IDE_MMIO
+ bool
+ select IDE_QDEV
+
+config IDE_VIA
+ bool
+ select IDE_PCI
+ select IDE_QDEV
+
+config MICRODRIVE
+ bool
+ select IDE_QDEV
+ depends on PCMCIA
+
+config AHCI
+ bool
+ select IDE_QDEV
+
+config AHCI_ICH9
+ bool
+ default y if PCI_DEVICES
+ depends on PCI
+ select AHCI
+
+config IDE_SII3112
+ bool
+ select IDE_PCI
+ select IDE_QDEV
diff --git a/hw/ide/ahci-allwinner.c b/hw/ide/ahci-allwinner.c
new file mode 100644
index 000000000..227e747ba
--- /dev/null
+++ b/hw/ide/ahci-allwinner.c
@@ -0,0 +1,127 @@
+/*
+ * QEMU Allwinner AHCI Emulation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qemu/module.h"
+#include "sysemu/dma.h"
+#include "hw/ide/internal.h"
+#include "migration/vmstate.h"
+#include "ahci_internal.h"
+
+#include "trace.h"
+
+#define ALLWINNER_AHCI_BISTAFR ((0xa0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_BISTCR ((0xa4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_BISTFCTR ((0xa8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_BISTSR ((0xac - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_BISTDECR ((0xb0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_DIAGNR0 ((0xb4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_DIAGNR1 ((0xb8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_OOBR ((0xbc - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_PHYCS0R ((0xc0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_PHYCS1R ((0xc4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_PHYCS2R ((0xc8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_TIMER1MS ((0xe0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_GPARAM1R ((0xe8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_GPARAM2R ((0xec - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_PPARAMR ((0xf0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_TESTR ((0xf4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_VERSIONR ((0xf8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_IDR ((0xfc - ALLWINNER_AHCI_MMIO_OFF) / 4)
+#define ALLWINNER_AHCI_RWCR ((0xfc - ALLWINNER_AHCI_MMIO_OFF) / 4)
+
+static uint64_t allwinner_ahci_mem_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ AllwinnerAHCIState *a = opaque;
+ AHCIState *s = &(SYSBUS_AHCI(a)->ahci);
+ uint64_t val = a->regs[addr / 4];
+
+ switch (addr / 4) {
+ case ALLWINNER_AHCI_PHYCS0R:
+ val |= 0x2 << 28;
+ break;
+ case ALLWINNER_AHCI_PHYCS2R:
+ val &= ~(0x1 << 24);
+ break;
+ }
+ trace_allwinner_ahci_mem_read(s, a, addr, val, size);
+ return val;
+}
+
+static void allwinner_ahci_mem_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ AllwinnerAHCIState *a = opaque;
+ AHCIState *s = &(SYSBUS_AHCI(a)->ahci);
+
+ trace_allwinner_ahci_mem_write(s, a, addr, val, size);
+ a->regs[addr / 4] = val;
+}
+
+static const MemoryRegionOps allwinner_ahci_mem_ops = {
+ .read = allwinner_ahci_mem_read,
+ .write = allwinner_ahci_mem_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void allwinner_ahci_init(Object *obj)
+{
+ SysbusAHCIState *s = SYSBUS_AHCI(obj);
+ AllwinnerAHCIState *a = ALLWINNER_AHCI(obj);
+
+ memory_region_init_io(&a->mmio, obj, &allwinner_ahci_mem_ops, a,
+ "allwinner-ahci", ALLWINNER_AHCI_MMIO_SIZE);
+ memory_region_add_subregion(&s->ahci.mem, ALLWINNER_AHCI_MMIO_OFF,
+ &a->mmio);
+}
+
+static const VMStateDescription vmstate_allwinner_ahci = {
+ .name = "allwinner-ahci",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, AllwinnerAHCIState,
+ ALLWINNER_AHCI_MMIO_SIZE / 4),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void allwinner_ahci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &vmstate_allwinner_ahci;
+}
+
+static const TypeInfo allwinner_ahci_info = {
+ .name = TYPE_ALLWINNER_AHCI,
+ .parent = TYPE_SYSBUS_AHCI,
+ .instance_size = sizeof(AllwinnerAHCIState),
+ .instance_init = allwinner_ahci_init,
+ .class_init = allwinner_ahci_class_init,
+};
+
+static void sysbus_ahci_register_types(void)
+{
+ type_register_static(&allwinner_ahci_info);
+}
+
+type_init(sysbus_ahci_register_types)
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
new file mode 100644
index 000000000..a94c6e26f
--- /dev/null
+++ b/hw/ide/ahci.c
@@ -0,0 +1,1843 @@
+/*
+ * QEMU AHCI Emulation
+ *
+ * Copyright (c) 2010 qiaochong@loongson.cn
+ * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com>
+ * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
+ * Copyright (c) 2010 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/pci.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+
+#include "qemu/error-report.h"
+#include "qemu/log.h"
+#include "qemu/main-loop.h"
+#include "qemu/module.h"
+#include "sysemu/block-backend.h"
+#include "sysemu/dma.h"
+#include "hw/ide/internal.h"
+#include "hw/ide/pci.h"
+#include "ahci_internal.h"
+
+#include "trace.h"
+
+static void check_cmd(AHCIState *s, int port);
+static int handle_cmd(AHCIState *s, int port, uint8_t slot);
+static void ahci_reset_port(AHCIState *s, int port);
+static bool ahci_write_fis_d2h(AHCIDevice *ad);
+static void ahci_init_d2h(AHCIDevice *ad);
+static int ahci_dma_prepare_buf(const IDEDMA *dma, int32_t limit);
+static bool ahci_map_clb_address(AHCIDevice *ad);
+static bool ahci_map_fis_address(AHCIDevice *ad);
+static void ahci_unmap_clb_address(AHCIDevice *ad);
+static void ahci_unmap_fis_address(AHCIDevice *ad);
+
+static const char *AHCIHostReg_lookup[AHCI_HOST_REG__COUNT] = {
+ [AHCI_HOST_REG_CAP] = "CAP",
+ [AHCI_HOST_REG_CTL] = "GHC",
+ [AHCI_HOST_REG_IRQ_STAT] = "IS",
+ [AHCI_HOST_REG_PORTS_IMPL] = "PI",
+ [AHCI_HOST_REG_VERSION] = "VS",
+ [AHCI_HOST_REG_CCC_CTL] = "CCC_CTL",
+ [AHCI_HOST_REG_CCC_PORTS] = "CCC_PORTS",
+ [AHCI_HOST_REG_EM_LOC] = "EM_LOC",
+ [AHCI_HOST_REG_EM_CTL] = "EM_CTL",
+ [AHCI_HOST_REG_CAP2] = "CAP2",
+ [AHCI_HOST_REG_BOHC] = "BOHC",
+};
+
+static const char *AHCIPortReg_lookup[AHCI_PORT_REG__COUNT] = {
+ [AHCI_PORT_REG_LST_ADDR] = "PxCLB",
+ [AHCI_PORT_REG_LST_ADDR_HI] = "PxCLBU",
+ [AHCI_PORT_REG_FIS_ADDR] = "PxFB",
+ [AHCI_PORT_REG_FIS_ADDR_HI] = "PxFBU",
+ [AHCI_PORT_REG_IRQ_STAT] = "PxIS",
+ [AHCI_PORT_REG_IRQ_MASK] = "PXIE",
+ [AHCI_PORT_REG_CMD] = "PxCMD",
+ [7] = "Reserved",
+ [AHCI_PORT_REG_TFDATA] = "PxTFD",
+ [AHCI_PORT_REG_SIG] = "PxSIG",
+ [AHCI_PORT_REG_SCR_STAT] = "PxSSTS",
+ [AHCI_PORT_REG_SCR_CTL] = "PxSCTL",
+ [AHCI_PORT_REG_SCR_ERR] = "PxSERR",
+ [AHCI_PORT_REG_SCR_ACT] = "PxSACT",
+ [AHCI_PORT_REG_CMD_ISSUE] = "PxCI",
+ [AHCI_PORT_REG_SCR_NOTIF] = "PxSNTF",
+ [AHCI_PORT_REG_FIS_CTL] = "PxFBS",
+ [AHCI_PORT_REG_DEV_SLEEP] = "PxDEVSLP",
+ [18 ... 27] = "Reserved",
+ [AHCI_PORT_REG_VENDOR_1 ...
+ AHCI_PORT_REG_VENDOR_4] = "PxVS",
+};
+
+static const char *AHCIPortIRQ_lookup[AHCI_PORT_IRQ__COUNT] = {
+ [AHCI_PORT_IRQ_BIT_DHRS] = "DHRS",
+ [AHCI_PORT_IRQ_BIT_PSS] = "PSS",
+ [AHCI_PORT_IRQ_BIT_DSS] = "DSS",
+ [AHCI_PORT_IRQ_BIT_SDBS] = "SDBS",
+ [AHCI_PORT_IRQ_BIT_UFS] = "UFS",
+ [AHCI_PORT_IRQ_BIT_DPS] = "DPS",
+ [AHCI_PORT_IRQ_BIT_PCS] = "PCS",
+ [AHCI_PORT_IRQ_BIT_DMPS] = "DMPS",
+ [8 ... 21] = "RESERVED",
+ [AHCI_PORT_IRQ_BIT_PRCS] = "PRCS",
+ [AHCI_PORT_IRQ_BIT_IPMS] = "IPMS",
+ [AHCI_PORT_IRQ_BIT_OFS] = "OFS",
+ [25] = "RESERVED",
+ [AHCI_PORT_IRQ_BIT_INFS] = "INFS",
+ [AHCI_PORT_IRQ_BIT_IFS] = "IFS",
+ [AHCI_PORT_IRQ_BIT_HBDS] = "HBDS",
+ [AHCI_PORT_IRQ_BIT_HBFS] = "HBFS",
+ [AHCI_PORT_IRQ_BIT_TFES] = "TFES",
+ [AHCI_PORT_IRQ_BIT_CPDS] = "CPDS"
+};
+
+static uint32_t ahci_port_read(AHCIState *s, int port, int offset)
+{
+ uint32_t val;
+ AHCIPortRegs *pr = &s->dev[port].port_regs;
+ enum AHCIPortReg regnum = offset / sizeof(uint32_t);
+ assert(regnum < (AHCI_PORT_ADDR_OFFSET_LEN / sizeof(uint32_t)));
+
+ switch (regnum) {
+ case AHCI_PORT_REG_LST_ADDR:
+ val = pr->lst_addr;
+ break;
+ case AHCI_PORT_REG_LST_ADDR_HI:
+ val = pr->lst_addr_hi;
+ break;
+ case AHCI_PORT_REG_FIS_ADDR:
+ val = pr->fis_addr;
+ break;
+ case AHCI_PORT_REG_FIS_ADDR_HI:
+ val = pr->fis_addr_hi;
+ break;
+ case AHCI_PORT_REG_IRQ_STAT:
+ val = pr->irq_stat;
+ break;
+ case AHCI_PORT_REG_IRQ_MASK:
+ val = pr->irq_mask;
+ break;
+ case AHCI_PORT_REG_CMD:
+ val = pr->cmd;
+ break;
+ case AHCI_PORT_REG_TFDATA:
+ val = pr->tfdata;
+ break;
+ case AHCI_PORT_REG_SIG:
+ val = pr->sig;
+ break;
+ case AHCI_PORT_REG_SCR_STAT:
+ if (s->dev[port].port.ifs[0].blk) {
+ val = SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP |
+ SATA_SCR_SSTATUS_SPD_GEN1 | SATA_SCR_SSTATUS_IPM_ACTIVE;
+ } else {
+ val = SATA_SCR_SSTATUS_DET_NODEV;
+ }
+ break;
+ case AHCI_PORT_REG_SCR_CTL:
+ val = pr->scr_ctl;
+ break;
+ case AHCI_PORT_REG_SCR_ERR:
+ val = pr->scr_err;
+ break;
+ case AHCI_PORT_REG_SCR_ACT:
+ val = pr->scr_act;
+ break;
+ case AHCI_PORT_REG_CMD_ISSUE:
+ val = pr->cmd_issue;
+ break;
+ default:
+ trace_ahci_port_read_default(s, port, AHCIPortReg_lookup[regnum],
+ offset);
+ val = 0;
+ }
+
+ trace_ahci_port_read(s, port, AHCIPortReg_lookup[regnum], offset, val);
+ return val;
+}
+
+static void ahci_irq_raise(AHCIState *s)
+{
+ DeviceState *dev_state = s->container;
+ PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state),
+ TYPE_PCI_DEVICE);
+
+ trace_ahci_irq_raise(s);
+
+ if (pci_dev && msi_enabled(pci_dev)) {
+ msi_notify(pci_dev, 0);
+ } else {
+ qemu_irq_raise(s->irq);
+ }
+}
+
+static void ahci_irq_lower(AHCIState *s)
+{
+ DeviceState *dev_state = s->container;
+ PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state),
+ TYPE_PCI_DEVICE);
+
+ trace_ahci_irq_lower(s);
+
+ if (!pci_dev || !msi_enabled(pci_dev)) {
+ qemu_irq_lower(s->irq);
+ }
+}
+
+static void ahci_check_irq(AHCIState *s)
+{
+ int i;
+ uint32_t old_irq = s->control_regs.irqstatus;
+
+ s->control_regs.irqstatus = 0;
+ for (i = 0; i < s->ports; i++) {
+ AHCIPortRegs *pr = &s->dev[i].port_regs;
+ if (pr->irq_stat & pr->irq_mask) {
+ s->control_regs.irqstatus |= (1 << i);
+ }
+ }
+ trace_ahci_check_irq(s, old_irq, s->control_regs.irqstatus);
+ if (s->control_regs.irqstatus &&
+ (s->control_regs.ghc & HOST_CTL_IRQ_EN)) {
+ ahci_irq_raise(s);
+ } else {
+ ahci_irq_lower(s);
+ }
+}
+
+static void ahci_trigger_irq(AHCIState *s, AHCIDevice *d,
+ enum AHCIPortIRQ irqbit)
+{
+ g_assert((unsigned)irqbit < 32);
+ uint32_t irq = 1U << irqbit;
+ uint32_t irqstat = d->port_regs.irq_stat | irq;
+
+ trace_ahci_trigger_irq(s, d->port_no,
+ AHCIPortIRQ_lookup[irqbit], irq,
+ d->port_regs.irq_stat, irqstat,
+ irqstat & d->port_regs.irq_mask);
+
+ d->port_regs.irq_stat = irqstat;
+ ahci_check_irq(s);
+}
+
+static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr,
+ uint32_t wanted)
+{
+ hwaddr len = wanted;
+
+ if (*ptr) {
+ dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
+ }
+
+ *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE);
+ if (len < wanted && *ptr) {
+ dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
+ *ptr = NULL;
+ }
+}
+
+/**
+ * Check the cmd register to see if we should start or stop
+ * the DMA or FIS RX engines.
+ *
+ * @ad: Device to dis/engage.
+ *
+ * @return 0 on success, -1 on error.
+ */
+static int ahci_cond_start_engines(AHCIDevice *ad)
+{
+ AHCIPortRegs *pr = &ad->port_regs;
+ bool cmd_start = pr->cmd & PORT_CMD_START;
+ bool cmd_on = pr->cmd & PORT_CMD_LIST_ON;
+ bool fis_start = pr->cmd & PORT_CMD_FIS_RX;
+ bool fis_on = pr->cmd & PORT_CMD_FIS_ON;
+
+ if (cmd_start && !cmd_on) {
+ if (!ahci_map_clb_address(ad)) {
+ pr->cmd &= ~PORT_CMD_START;
+ error_report("AHCI: Failed to start DMA engine: "
+ "bad command list buffer address");
+ return -1;
+ }
+ } else if (!cmd_start && cmd_on) {
+ ahci_unmap_clb_address(ad);
+ }
+
+ if (fis_start && !fis_on) {
+ if (!ahci_map_fis_address(ad)) {
+ pr->cmd &= ~PORT_CMD_FIS_RX;
+ error_report("AHCI: Failed to start FIS receive engine: "
+ "bad FIS receive buffer address");
+ return -1;
+ }
+ } else if (!fis_start && fis_on) {
+ ahci_unmap_fis_address(ad);
+ }
+
+ return 0;
+}
+
+static void ahci_port_write(AHCIState *s, int port, int offset, uint32_t val)
+{
+ AHCIPortRegs *pr = &s->dev[port].port_regs;
+ enum AHCIPortReg regnum = offset / sizeof(uint32_t);
+ assert(regnum < (AHCI_PORT_ADDR_OFFSET_LEN / sizeof(uint32_t)));
+ trace_ahci_port_write(s, port, AHCIPortReg_lookup[regnum], offset, val);
+
+ switch (regnum) {
+ case AHCI_PORT_REG_LST_ADDR:
+ pr->lst_addr = val;
+ break;
+ case AHCI_PORT_REG_LST_ADDR_HI:
+ pr->lst_addr_hi = val;
+ break;
+ case AHCI_PORT_REG_FIS_ADDR:
+ pr->fis_addr = val;
+ break;
+ case AHCI_PORT_REG_FIS_ADDR_HI:
+ pr->fis_addr_hi = val;
+ break;
+ case AHCI_PORT_REG_IRQ_STAT:
+ pr->irq_stat &= ~val;
+ ahci_check_irq(s);
+ break;
+ case AHCI_PORT_REG_IRQ_MASK:
+ pr->irq_mask = val & 0xfdc000ff;
+ ahci_check_irq(s);
+ break;
+ case AHCI_PORT_REG_CMD:
+ /* Block any Read-only fields from being set;
+ * including LIST_ON and FIS_ON.
+ * The spec requires to set ICC bits to zero after the ICC change
+ * is done. We don't support ICC state changes, therefore always
+ * force the ICC bits to zero.
+ */
+ pr->cmd = (pr->cmd & PORT_CMD_RO_MASK) |
+ (val & ~(PORT_CMD_RO_MASK | PORT_CMD_ICC_MASK));
+
+ /* Check FIS RX and CLB engines */
+ ahci_cond_start_engines(&s->dev[port]);
+
+ /* XXX usually the FIS would be pending on the bus here and
+ issuing deferred until the OS enables FIS receival.
+ Instead, we only submit it once - which works in most
+ cases, but is a hack. */
+ if ((pr->cmd & PORT_CMD_FIS_ON) &&
+ !s->dev[port].init_d2h_sent) {
+ ahci_init_d2h(&s->dev[port]);
+ }
+
+ check_cmd(s, port);
+ break;
+ case AHCI_PORT_REG_TFDATA:
+ case AHCI_PORT_REG_SIG:
+ case AHCI_PORT_REG_SCR_STAT:
+ /* Read Only */
+ break;
+ case AHCI_PORT_REG_SCR_CTL:
+ if (((pr->scr_ctl & AHCI_SCR_SCTL_DET) == 1) &&
+ ((val & AHCI_SCR_SCTL_DET) == 0)) {
+ ahci_reset_port(s, port);
+ }
+ pr->scr_ctl = val;
+ break;
+ case AHCI_PORT_REG_SCR_ERR:
+ pr->scr_err &= ~val;
+ break;
+ case AHCI_PORT_REG_SCR_ACT:
+ /* RW1 */
+ pr->scr_act |= val;
+ break;
+ case AHCI_PORT_REG_CMD_ISSUE:
+ pr->cmd_issue |= val;
+ check_cmd(s, port);
+ break;
+ default:
+ trace_ahci_port_write_unimpl(s, port, AHCIPortReg_lookup[regnum],
+ offset, val);
+ qemu_log_mask(LOG_UNIMP, "Attempted write to unimplemented register: "
+ "AHCI port %d register %s, offset 0x%x: 0x%"PRIx32,
+ port, AHCIPortReg_lookup[regnum], offset, val);
+ break;
+ }
+}
+
+static uint64_t ahci_mem_read_32(void *opaque, hwaddr addr)
+{
+ AHCIState *s = opaque;
+ uint32_t val = 0;
+
+ if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) {
+ enum AHCIHostReg regnum = addr / 4;
+ assert(regnum < AHCI_HOST_REG__COUNT);
+
+ switch (regnum) {
+ case AHCI_HOST_REG_CAP:
+ val = s->control_regs.cap;
+ break;
+ case AHCI_HOST_REG_CTL:
+ val = s->control_regs.ghc;
+ break;
+ case AHCI_HOST_REG_IRQ_STAT:
+ val = s->control_regs.irqstatus;
+ break;
+ case AHCI_HOST_REG_PORTS_IMPL:
+ val = s->control_regs.impl;
+ break;
+ case AHCI_HOST_REG_VERSION:
+ val = s->control_regs.version;
+ break;
+ default:
+ trace_ahci_mem_read_32_host_default(s, AHCIHostReg_lookup[regnum],
+ addr);
+ }
+ trace_ahci_mem_read_32_host(s, AHCIHostReg_lookup[regnum], addr, val);
+ } else if ((addr >= AHCI_PORT_REGS_START_ADDR) &&
+ (addr < (AHCI_PORT_REGS_START_ADDR +
+ (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) {
+ val = ahci_port_read(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7,
+ addr & AHCI_PORT_ADDR_OFFSET_MASK);
+ } else {
+ trace_ahci_mem_read_32_default(s, addr, val);
+ }
+
+ trace_ahci_mem_read_32(s, addr, val);
+ return val;
+}
+
+
+/**
+ * AHCI 1.3 section 3 ("HBA Memory Registers")
+ * Support unaligned 8/16/32 bit reads, and 64 bit aligned reads.
+ * Caller is responsible for masking unwanted higher order bytes.
+ */
+static uint64_t ahci_mem_read(void *opaque, hwaddr addr, unsigned size)
+{
+ hwaddr aligned = addr & ~0x3;
+ int ofst = addr - aligned;
+ uint64_t lo = ahci_mem_read_32(opaque, aligned);
+ uint64_t hi;
+ uint64_t val;
+
+ /* if < 8 byte read does not cross 4 byte boundary */
+ if (ofst + size <= 4) {
+ val = lo >> (ofst * 8);
+ } else {
+ g_assert(size > 1);
+
+ /* If the 64bit read is unaligned, we will produce undefined
+ * results. AHCI does not support unaligned 64bit reads. */
+ hi = ahci_mem_read_32(opaque, aligned + 4);
+ val = (hi << 32 | lo) >> (ofst * 8);
+ }
+
+ trace_ahci_mem_read(opaque, size, addr, val);
+ return val;
+}
+
+
+static void ahci_mem_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ AHCIState *s = opaque;
+
+ trace_ahci_mem_write(s, size, addr, val);
+
+ /* Only aligned reads are allowed on AHCI */
+ if (addr & 3) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ahci: Mis-aligned write to addr 0x%03" HWADDR_PRIX "\n",
+ addr);
+ return;
+ }
+
+ if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) {
+ enum AHCIHostReg regnum = addr / 4;
+ assert(regnum < AHCI_HOST_REG__COUNT);
+
+ switch (regnum) {
+ case AHCI_HOST_REG_CAP: /* R/WO, RO */
+ /* FIXME handle R/WO */
+ break;
+ case AHCI_HOST_REG_CTL: /* R/W */
+ if (val & HOST_CTL_RESET) {
+ ahci_reset(s);
+ } else {
+ s->control_regs.ghc = (val & 0x3) | HOST_CTL_AHCI_EN;
+ ahci_check_irq(s);
+ }
+ break;
+ case AHCI_HOST_REG_IRQ_STAT: /* R/WC, RO */
+ s->control_regs.irqstatus &= ~val;
+ ahci_check_irq(s);
+ break;
+ case AHCI_HOST_REG_PORTS_IMPL: /* R/WO, RO */
+ /* FIXME handle R/WO */
+ break;
+ case AHCI_HOST_REG_VERSION: /* RO */
+ /* FIXME report write? */
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "Attempted write to unimplemented register: "
+ "AHCI host register %s, "
+ "offset 0x%"PRIx64": 0x%"PRIx64,
+ AHCIHostReg_lookup[regnum], addr, val);
+ trace_ahci_mem_write_host_unimpl(s, size,
+ AHCIHostReg_lookup[regnum], addr);
+ }
+ trace_ahci_mem_write_host(s, size, AHCIHostReg_lookup[regnum],
+ addr, val);
+ } else if ((addr >= AHCI_PORT_REGS_START_ADDR) &&
+ (addr < (AHCI_PORT_REGS_START_ADDR +
+ (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) {
+ ahci_port_write(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7,
+ addr & AHCI_PORT_ADDR_OFFSET_MASK, val);
+ } else {
+ qemu_log_mask(LOG_UNIMP, "Attempted write to unimplemented register: "
+ "AHCI global register at offset 0x%"PRIx64": 0x%"PRIx64,
+ addr, val);
+ trace_ahci_mem_write_unimpl(s, size, addr, val);
+ }
+}
+
+static const MemoryRegionOps ahci_mem_ops = {
+ .read = ahci_mem_read,
+ .write = ahci_mem_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static uint64_t ahci_idp_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ AHCIState *s = opaque;
+
+ if (addr == s->idp_offset) {
+ /* index register */
+ return s->idp_index;
+ } else if (addr == s->idp_offset + 4) {
+ /* data register - do memory read at location selected by index */
+ return ahci_mem_read(opaque, s->idp_index, size);
+ } else {
+ return 0;
+ }
+}
+
+static void ahci_idp_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ AHCIState *s = opaque;
+
+ if (addr == s->idp_offset) {
+ /* index register - mask off reserved bits */
+ s->idp_index = (uint32_t)val & ((AHCI_MEM_BAR_SIZE - 1) & ~3);
+ } else if (addr == s->idp_offset + 4) {
+ /* data register - do memory write at location selected by index */
+ ahci_mem_write(opaque, s->idp_index, val, size);
+ }
+}
+
+static const MemoryRegionOps ahci_idp_ops = {
+ .read = ahci_idp_read,
+ .write = ahci_idp_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+
+static void ahci_reg_init(AHCIState *s)
+{
+ int i;
+
+ s->control_regs.cap = (s->ports - 1) |
+ (AHCI_NUM_COMMAND_SLOTS << 8) |
+ (AHCI_SUPPORTED_SPEED_GEN1 << AHCI_SUPPORTED_SPEED) |
+ HOST_CAP_NCQ | HOST_CAP_AHCI | HOST_CAP_64;
+
+ s->control_regs.impl = (1 << s->ports) - 1;
+
+ s->control_regs.version = AHCI_VERSION_1_0;
+
+ for (i = 0; i < s->ports; i++) {
+ s->dev[i].port_state = STATE_RUN;
+ }
+}
+
+static void check_cmd(AHCIState *s, int port)
+{
+ AHCIPortRegs *pr = &s->dev[port].port_regs;
+ uint8_t slot;
+
+ if ((pr->cmd & PORT_CMD_START) && pr->cmd_issue) {
+ for (slot = 0; (slot < 32) && pr->cmd_issue; slot++) {
+ if ((pr->cmd_issue & (1U << slot)) &&
+ !handle_cmd(s, port, slot)) {
+ pr->cmd_issue &= ~(1U << slot);
+ }
+ }
+ }
+}
+
+static void ahci_check_cmd_bh(void *opaque)
+{
+ AHCIDevice *ad = opaque;
+
+ qemu_bh_delete(ad->check_bh);
+ ad->check_bh = NULL;
+
+ check_cmd(ad->hba, ad->port_no);
+}
+
+static void ahci_init_d2h(AHCIDevice *ad)
+{
+ IDEState *ide_state = &ad->port.ifs[0];
+ AHCIPortRegs *pr = &ad->port_regs;
+
+ if (ad->init_d2h_sent) {
+ return;
+ }
+
+ if (ahci_write_fis_d2h(ad)) {
+ ad->init_d2h_sent = true;
+ /* We're emulating receiving the first Reg H2D Fis from the device;
+ * Update the SIG register, but otherwise proceed as normal. */
+ pr->sig = ((uint32_t)ide_state->hcyl << 24) |
+ (ide_state->lcyl << 16) |
+ (ide_state->sector << 8) |
+ (ide_state->nsector & 0xFF);
+ }
+}
+
+static void ahci_set_signature(AHCIDevice *ad, uint32_t sig)
+{
+ IDEState *s = &ad->port.ifs[0];
+ s->hcyl = sig >> 24 & 0xFF;
+ s->lcyl = sig >> 16 & 0xFF;
+ s->sector = sig >> 8 & 0xFF;
+ s->nsector = sig & 0xFF;
+
+ trace_ahci_set_signature(ad->hba, ad->port_no, s->nsector, s->sector,
+ s->lcyl, s->hcyl, sig);
+}
+
+static void ahci_reset_port(AHCIState *s, int port)
+{
+ AHCIDevice *d = &s->dev[port];
+ AHCIPortRegs *pr = &d->port_regs;
+ IDEState *ide_state = &d->port.ifs[0];
+ int i;
+
+ trace_ahci_reset_port(s, port);
+
+ ide_bus_reset(&d->port);
+ ide_state->ncq_queues = AHCI_MAX_CMDS;
+
+ pr->scr_stat = 0;
+ pr->scr_err = 0;
+ pr->scr_act = 0;
+ pr->tfdata = 0x7F;
+ pr->sig = 0xFFFFFFFF;
+ d->busy_slot = -1;
+ d->init_d2h_sent = false;
+
+ ide_state = &s->dev[port].port.ifs[0];
+ if (!ide_state->blk) {
+ return;
+ }
+
+ /* reset ncq queue */
+ for (i = 0; i < AHCI_MAX_CMDS; i++) {
+ NCQTransferState *ncq_tfs = &s->dev[port].ncq_tfs[i];
+ ncq_tfs->halt = false;
+ if (!ncq_tfs->used) {
+ continue;
+ }
+
+ if (ncq_tfs->aiocb) {
+ blk_aio_cancel(ncq_tfs->aiocb);
+ ncq_tfs->aiocb = NULL;
+ }
+
+ /* Maybe we just finished the request thanks to blk_aio_cancel() */
+ if (!ncq_tfs->used) {
+ continue;
+ }
+
+ qemu_sglist_destroy(&ncq_tfs->sglist);
+ ncq_tfs->used = 0;
+ }
+
+ s->dev[port].port_state = STATE_RUN;
+ if (ide_state->drive_kind == IDE_CD) {
+ ahci_set_signature(d, SATA_SIGNATURE_CDROM);\
+ ide_state->status = SEEK_STAT | WRERR_STAT | READY_STAT;
+ } else {
+ ahci_set_signature(d, SATA_SIGNATURE_DISK);
+ ide_state->status = SEEK_STAT | WRERR_STAT;
+ }
+
+ ide_state->error = 1;
+ ahci_init_d2h(d);
+}
+
+/* Buffer pretty output based on a raw FIS structure. */
+static char *ahci_pretty_buffer_fis(const uint8_t *fis, int cmd_len)
+{
+ int i;
+ GString *s = g_string_new("FIS:");
+
+ for (i = 0; i < cmd_len; i++) {
+ if ((i & 0xf) == 0) {
+ g_string_append_printf(s, "\n0x%02x: ", i);
+ }
+ g_string_append_printf(s, "%02x ", fis[i]);
+ }
+ g_string_append_c(s, '\n');
+
+ return g_string_free(s, FALSE);
+}
+
+static bool ahci_map_fis_address(AHCIDevice *ad)
+{
+ AHCIPortRegs *pr = &ad->port_regs;
+ map_page(ad->hba->as, &ad->res_fis,
+ ((uint64_t)pr->fis_addr_hi << 32) | pr->fis_addr, 256);
+ if (ad->res_fis != NULL) {
+ pr->cmd |= PORT_CMD_FIS_ON;
+ return true;
+ }
+
+ pr->cmd &= ~PORT_CMD_FIS_ON;
+ return false;
+}
+
+static void ahci_unmap_fis_address(AHCIDevice *ad)
+{
+ if (ad->res_fis == NULL) {
+ trace_ahci_unmap_fis_address_null(ad->hba, ad->port_no);
+ return;
+ }
+ ad->port_regs.cmd &= ~PORT_CMD_FIS_ON;
+ dma_memory_unmap(ad->hba->as, ad->res_fis, 256,
+ DMA_DIRECTION_FROM_DEVICE, 256);
+ ad->res_fis = NULL;
+}
+
+static bool ahci_map_clb_address(AHCIDevice *ad)
+{
+ AHCIPortRegs *pr = &ad->port_regs;
+ ad->cur_cmd = NULL;
+ map_page(ad->hba->as, &ad->lst,
+ ((uint64_t)pr->lst_addr_hi << 32) | pr->lst_addr, 1024);
+ if (ad->lst != NULL) {
+ pr->cmd |= PORT_CMD_LIST_ON;
+ return true;
+ }
+
+ pr->cmd &= ~PORT_CMD_LIST_ON;
+ return false;
+}
+
+static void ahci_unmap_clb_address(AHCIDevice *ad)
+{
+ if (ad->lst == NULL) {
+ trace_ahci_unmap_clb_address_null(ad->hba, ad->port_no);
+ return;
+ }
+ ad->port_regs.cmd &= ~PORT_CMD_LIST_ON;
+ dma_memory_unmap(ad->hba->as, ad->lst, 1024,
+ DMA_DIRECTION_FROM_DEVICE, 1024);
+ ad->lst = NULL;
+}
+
+static void ahci_write_fis_sdb(AHCIState *s, NCQTransferState *ncq_tfs)
+{
+ AHCIDevice *ad = ncq_tfs->drive;
+ AHCIPortRegs *pr = &ad->port_regs;
+ IDEState *ide_state;
+ SDBFIS *sdb_fis;
+
+ if (!ad->res_fis ||
+ !(pr->cmd & PORT_CMD_FIS_RX)) {
+ return;
+ }
+
+ sdb_fis = (SDBFIS *)&ad->res_fis[RES_FIS_SDBFIS];
+ ide_state = &ad->port.ifs[0];
+
+ sdb_fis->type = SATA_FIS_TYPE_SDB;
+ /* Interrupt pending & Notification bit */
+ sdb_fis->flags = 0x40; /* Interrupt bit, always 1 for NCQ */
+ sdb_fis->status = ide_state->status & 0x77;
+ sdb_fis->error = ide_state->error;
+ /* update SAct field in SDB_FIS */
+ sdb_fis->payload = cpu_to_le32(ad->finished);
+
+ /* Update shadow registers (except BSY 0x80 and DRQ 0x08) */
+ pr->tfdata = (ad->port.ifs[0].error << 8) |
+ (ad->port.ifs[0].status & 0x77) |
+ (pr->tfdata & 0x88);
+ pr->scr_act &= ~ad->finished;
+ ad->finished = 0;
+
+ /* Trigger IRQ if interrupt bit is set (which currently, it always is) */
+ if (sdb_fis->flags & 0x40) {
+ ahci_trigger_irq(s, ad, AHCI_PORT_IRQ_BIT_SDBS);
+ }
+}
+
+static void ahci_write_fis_pio(AHCIDevice *ad, uint16_t len, bool pio_fis_i)
+{
+ AHCIPortRegs *pr = &ad->port_regs;
+ uint8_t *pio_fis;
+ IDEState *s = &ad->port.ifs[0];
+
+ if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) {
+ return;
+ }
+
+ pio_fis = &ad->res_fis[RES_FIS_PSFIS];
+
+ pio_fis[0] = SATA_FIS_TYPE_PIO_SETUP;
+ pio_fis[1] = (pio_fis_i ? (1 << 6) : 0);
+ pio_fis[2] = s->status;
+ pio_fis[3] = s->error;
+
+ pio_fis[4] = s->sector;
+ pio_fis[5] = s->lcyl;
+ pio_fis[6] = s->hcyl;
+ pio_fis[7] = s->select;
+ pio_fis[8] = s->hob_sector;
+ pio_fis[9] = s->hob_lcyl;
+ pio_fis[10] = s->hob_hcyl;
+ pio_fis[11] = 0;
+ pio_fis[12] = s->nsector & 0xFF;
+ pio_fis[13] = (s->nsector >> 8) & 0xFF;
+ pio_fis[14] = 0;
+ pio_fis[15] = s->status;
+ pio_fis[16] = len & 255;
+ pio_fis[17] = len >> 8;
+ pio_fis[18] = 0;
+ pio_fis[19] = 0;
+
+ /* Update shadow registers: */
+ pr->tfdata = (ad->port.ifs[0].error << 8) |
+ ad->port.ifs[0].status;
+
+ if (pio_fis[2] & ERR_STAT) {
+ ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_TFES);
+ }
+}
+
+static bool ahci_write_fis_d2h(AHCIDevice *ad)
+{
+ AHCIPortRegs *pr = &ad->port_regs;
+ uint8_t *d2h_fis;
+ int i;
+ IDEState *s = &ad->port.ifs[0];
+
+ if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) {
+ return false;
+ }
+
+ d2h_fis = &ad->res_fis[RES_FIS_RFIS];
+
+ d2h_fis[0] = SATA_FIS_TYPE_REGISTER_D2H;
+ d2h_fis[1] = (1 << 6); /* interrupt bit */
+ d2h_fis[2] = s->status;
+ d2h_fis[3] = s->error;
+
+ d2h_fis[4] = s->sector;
+ d2h_fis[5] = s->lcyl;
+ d2h_fis[6] = s->hcyl;
+ d2h_fis[7] = s->select;
+ d2h_fis[8] = s->hob_sector;
+ d2h_fis[9] = s->hob_lcyl;
+ d2h_fis[10] = s->hob_hcyl;
+ d2h_fis[11] = 0;
+ d2h_fis[12] = s->nsector & 0xFF;
+ d2h_fis[13] = (s->nsector >> 8) & 0xFF;
+ for (i = 14; i < 20; i++) {
+ d2h_fis[i] = 0;
+ }
+
+ /* Update shadow registers: */
+ pr->tfdata = (ad->port.ifs[0].error << 8) |
+ ad->port.ifs[0].status;
+
+ if (d2h_fis[2] & ERR_STAT) {
+ ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_TFES);
+ }
+
+ ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_DHRS);
+ return true;
+}
+
+static int prdt_tbl_entry_size(const AHCI_SG *tbl)
+{
+ /* flags_size is zero-based */
+ return (le32_to_cpu(tbl->flags_size) & AHCI_PRDT_SIZE_MASK) + 1;
+}
+
+/**
+ * Fetch entries in a guest-provided PRDT and convert it into a QEMU SGlist.
+ * @ad: The AHCIDevice for whom we are building the SGList.
+ * @sglist: The SGList target to add PRD entries to.
+ * @cmd: The AHCI Command Header that describes where the PRDT is.
+ * @limit: The remaining size of the S/ATA transaction, in bytes.
+ * @offset: The number of bytes already transferred, in bytes.
+ *
+ * The AHCI PRDT can describe up to 256GiB. S/ATA only support transactions of
+ * up to 32MiB as of ATA8-ACS3 rev 1b, assuming a 512 byte sector size. We stop
+ * building the sglist from the PRDT as soon as we hit @limit bytes,
+ * which is <= INT32_MAX/2GiB.
+ */
+static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
+ AHCICmdHdr *cmd, int64_t limit, uint64_t offset)
+{
+ uint16_t opts = le16_to_cpu(cmd->opts);
+ uint16_t prdtl = le16_to_cpu(cmd->prdtl);
+ uint64_t cfis_addr = le64_to_cpu(cmd->tbl_addr);
+ uint64_t prdt_addr = cfis_addr + 0x80;
+ dma_addr_t prdt_len = (prdtl * sizeof(AHCI_SG));
+ dma_addr_t real_prdt_len = prdt_len;
+ uint8_t *prdt;
+ int i;
+ int r = 0;
+ uint64_t sum = 0;
+ int off_idx = -1;
+ int64_t off_pos = -1;
+ int tbl_entry_size;
+ IDEBus *bus = &ad->port;
+ BusState *qbus = BUS(bus);
+
+ trace_ahci_populate_sglist(ad->hba, ad->port_no);
+
+ if (!prdtl) {
+ trace_ahci_populate_sglist_no_prdtl(ad->hba, ad->port_no, opts);
+ return -1;
+ }
+
+ /* map PRDT */
+ if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len,
+ DMA_DIRECTION_TO_DEVICE))){
+ trace_ahci_populate_sglist_no_map(ad->hba, ad->port_no);
+ return -1;
+ }
+
+ if (prdt_len < real_prdt_len) {
+ trace_ahci_populate_sglist_short_map(ad->hba, ad->port_no);
+ r = -1;
+ goto out;
+ }
+
+ /* Get entries in the PRDT, init a qemu sglist accordingly */
+ if (prdtl > 0) {
+ AHCI_SG *tbl = (AHCI_SG *)prdt;
+ sum = 0;
+ for (i = 0; i < prdtl; i++) {
+ tbl_entry_size = prdt_tbl_entry_size(&tbl[i]);
+ if (offset < (sum + tbl_entry_size)) {
+ off_idx = i;
+ off_pos = offset - sum;
+ break;
+ }
+ sum += tbl_entry_size;
+ }
+ if ((off_idx == -1) || (off_pos < 0) || (off_pos > tbl_entry_size)) {
+ trace_ahci_populate_sglist_bad_offset(ad->hba, ad->port_no,
+ off_idx, off_pos);
+ r = -1;
+ goto out;
+ }
+
+ qemu_sglist_init(sglist, qbus->parent, (prdtl - off_idx),
+ ad->hba->as);
+ qemu_sglist_add(sglist, le64_to_cpu(tbl[off_idx].addr) + off_pos,
+ MIN(prdt_tbl_entry_size(&tbl[off_idx]) - off_pos,
+ limit));
+
+ for (i = off_idx + 1; i < prdtl && sglist->size < limit; i++) {
+ qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr),
+ MIN(prdt_tbl_entry_size(&tbl[i]),
+ limit - sglist->size));
+ }
+ }
+
+out:
+ dma_memory_unmap(ad->hba->as, prdt, prdt_len,
+ DMA_DIRECTION_TO_DEVICE, prdt_len);
+ return r;
+}
+
+static void ncq_err(NCQTransferState *ncq_tfs)
+{
+ IDEState *ide_state = &ncq_tfs->drive->port.ifs[0];
+
+ ide_state->error = ABRT_ERR;
+ ide_state->status = READY_STAT | ERR_STAT;
+ ncq_tfs->drive->port_regs.scr_err |= (1 << ncq_tfs->tag);
+ qemu_sglist_destroy(&ncq_tfs->sglist);
+ ncq_tfs->used = 0;
+}
+
+static void ncq_finish(NCQTransferState *ncq_tfs)
+{
+ /* If we didn't error out, set our finished bit. Errored commands
+ * do not get a bit set for the SDB FIS ACT register, nor do they
+ * clear the outstanding bit in scr_act (PxSACT). */
+ if (!(ncq_tfs->drive->port_regs.scr_err & (1 << ncq_tfs->tag))) {
+ ncq_tfs->drive->finished |= (1 << ncq_tfs->tag);
+ }
+
+ ahci_write_fis_sdb(ncq_tfs->drive->hba, ncq_tfs);
+
+ trace_ncq_finish(ncq_tfs->drive->hba, ncq_tfs->drive->port_no,
+ ncq_tfs->tag);
+
+ block_acct_done(blk_get_stats(ncq_tfs->drive->port.ifs[0].blk),
+ &ncq_tfs->acct);
+ qemu_sglist_destroy(&ncq_tfs->sglist);
+ ncq_tfs->used = 0;
+}
+
+static void ncq_cb(void *opaque, int ret)
+{
+ NCQTransferState *ncq_tfs = (NCQTransferState *)opaque;
+ IDEState *ide_state = &ncq_tfs->drive->port.ifs[0];
+
+ ncq_tfs->aiocb = NULL;
+
+ if (ret < 0) {
+ bool is_read = ncq_tfs->cmd == READ_FPDMA_QUEUED;
+ BlockErrorAction action = blk_get_error_action(ide_state->blk,
+ is_read, -ret);
+ if (action == BLOCK_ERROR_ACTION_STOP) {
+ ncq_tfs->halt = true;
+ ide_state->bus->error_status = IDE_RETRY_HBA;
+ } else if (action == BLOCK_ERROR_ACTION_REPORT) {
+ ncq_err(ncq_tfs);
+ }
+ blk_error_action(ide_state->blk, action, is_read, -ret);
+ } else {
+ ide_state->status = READY_STAT | SEEK_STAT;
+ }
+
+ if (!ncq_tfs->halt) {
+ ncq_finish(ncq_tfs);
+ }
+}
+
+static int is_ncq(uint8_t ata_cmd)
+{
+ /* Based on SATA 3.2 section 13.6.3.2 */
+ switch (ata_cmd) {
+ case READ_FPDMA_QUEUED:
+ case WRITE_FPDMA_QUEUED:
+ case NCQ_NON_DATA:
+ case RECEIVE_FPDMA_QUEUED:
+ case SEND_FPDMA_QUEUED:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static void execute_ncq_command(NCQTransferState *ncq_tfs)
+{
+ AHCIDevice *ad = ncq_tfs->drive;
+ IDEState *ide_state = &ad->port.ifs[0];
+ int port = ad->port_no;
+
+ g_assert(is_ncq(ncq_tfs->cmd));
+ ncq_tfs->halt = false;
+
+ switch (ncq_tfs->cmd) {
+ case READ_FPDMA_QUEUED:
+ trace_execute_ncq_command_read(ad->hba, port, ncq_tfs->tag,
+ ncq_tfs->sector_count, ncq_tfs->lba);
+ dma_acct_start(ide_state->blk, &ncq_tfs->acct,
+ &ncq_tfs->sglist, BLOCK_ACCT_READ);
+ ncq_tfs->aiocb = dma_blk_read(ide_state->blk, &ncq_tfs->sglist,
+ ncq_tfs->lba << BDRV_SECTOR_BITS,
+ BDRV_SECTOR_SIZE,
+ ncq_cb, ncq_tfs);
+ break;
+ case WRITE_FPDMA_QUEUED:
+ trace_execute_ncq_command_read(ad->hba, port, ncq_tfs->tag,
+ ncq_tfs->sector_count, ncq_tfs->lba);
+ dma_acct_start(ide_state->blk, &ncq_tfs->acct,
+ &ncq_tfs->sglist, BLOCK_ACCT_WRITE);
+ ncq_tfs->aiocb = dma_blk_write(ide_state->blk, &ncq_tfs->sglist,
+ ncq_tfs->lba << BDRV_SECTOR_BITS,
+ BDRV_SECTOR_SIZE,
+ ncq_cb, ncq_tfs);
+ break;
+ default:
+ trace_execute_ncq_command_unsup(ad->hba, port,
+ ncq_tfs->tag, ncq_tfs->cmd);
+ ncq_err(ncq_tfs);
+ }
+}
+
+
+static void process_ncq_command(AHCIState *s, int port, const uint8_t *cmd_fis,
+ uint8_t slot)
+{
+ AHCIDevice *ad = &s->dev[port];
+ const NCQFrame *ncq_fis = (NCQFrame *)cmd_fis;
+ uint8_t tag = ncq_fis->tag >> 3;
+ NCQTransferState *ncq_tfs = &ad->ncq_tfs[tag];
+ size_t size;
+
+ g_assert(is_ncq(ncq_fis->command));
+ if (ncq_tfs->used) {
+ /* error - already in use */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: tag %d already used\n",
+ __func__, tag);
+ return;
+ }
+
+ ncq_tfs->used = 1;
+ ncq_tfs->drive = ad;
+ ncq_tfs->slot = slot;
+ ncq_tfs->cmdh = &((AHCICmdHdr *)ad->lst)[slot];
+ ncq_tfs->cmd = ncq_fis->command;
+ ncq_tfs->lba = ((uint64_t)ncq_fis->lba5 << 40) |
+ ((uint64_t)ncq_fis->lba4 << 32) |
+ ((uint64_t)ncq_fis->lba3 << 24) |
+ ((uint64_t)ncq_fis->lba2 << 16) |
+ ((uint64_t)ncq_fis->lba1 << 8) |
+ (uint64_t)ncq_fis->lba0;
+ ncq_tfs->tag = tag;
+
+ /* Sanity-check the NCQ packet */
+ if (tag != slot) {
+ trace_process_ncq_command_mismatch(s, port, tag, slot);
+ }
+
+ if (ncq_fis->aux0 || ncq_fis->aux1 || ncq_fis->aux2 || ncq_fis->aux3) {
+ trace_process_ncq_command_aux(s, port, tag);
+ }
+ if (ncq_fis->prio || ncq_fis->icc) {
+ trace_process_ncq_command_prioicc(s, port, tag);
+ }
+ if (ncq_fis->fua & NCQ_FIS_FUA_MASK) {
+ trace_process_ncq_command_fua(s, port, tag);
+ }
+ if (ncq_fis->tag & NCQ_FIS_RARC_MASK) {
+ trace_process_ncq_command_rarc(s, port, tag);
+ }
+
+ ncq_tfs->sector_count = ((ncq_fis->sector_count_high << 8) |
+ ncq_fis->sector_count_low);
+ if (!ncq_tfs->sector_count) {
+ ncq_tfs->sector_count = 0x10000;
+ }
+ size = ncq_tfs->sector_count * BDRV_SECTOR_SIZE;
+ ahci_populate_sglist(ad, &ncq_tfs->sglist, ncq_tfs->cmdh, size, 0);
+
+ if (ncq_tfs->sglist.size < size) {
+ error_report("ahci: PRDT length for NCQ command (0x%zx) "
+ "is smaller than the requested size (0x%zx)",
+ ncq_tfs->sglist.size, size);
+ ncq_err(ncq_tfs);
+ ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_OFS);
+ return;
+ } else if (ncq_tfs->sglist.size != size) {
+ trace_process_ncq_command_large(s, port, tag,
+ ncq_tfs->sglist.size, size);
+ }
+
+ trace_process_ncq_command(s, port, tag,
+ ncq_fis->command,
+ ncq_tfs->lba,
+ ncq_tfs->lba + ncq_tfs->sector_count - 1);
+ execute_ncq_command(ncq_tfs);
+}
+
+static AHCICmdHdr *get_cmd_header(AHCIState *s, uint8_t port, uint8_t slot)
+{
+ if (port >= s->ports || slot >= AHCI_MAX_CMDS) {
+ return NULL;
+ }
+
+ return s->dev[port].lst ? &((AHCICmdHdr *)s->dev[port].lst)[slot] : NULL;
+}
+
+static void handle_reg_h2d_fis(AHCIState *s, int port,
+ uint8_t slot, const uint8_t *cmd_fis)
+{
+ IDEState *ide_state = &s->dev[port].port.ifs[0];
+ AHCICmdHdr *cmd = get_cmd_header(s, port, slot);
+ uint16_t opts = le16_to_cpu(cmd->opts);
+
+ if (cmd_fis[1] & 0x0F) {
+ trace_handle_reg_h2d_fis_pmp(s, port, cmd_fis[1],
+ cmd_fis[2], cmd_fis[3]);
+ return;
+ }
+
+ if (cmd_fis[1] & 0x70) {
+ trace_handle_reg_h2d_fis_res(s, port, cmd_fis[1],
+ cmd_fis[2], cmd_fis[3]);
+ return;
+ }
+
+ if (!(cmd_fis[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER)) {
+ switch (s->dev[port].port_state) {
+ case STATE_RUN:
+ if (cmd_fis[15] & ATA_SRST) {
+ s->dev[port].port_state = STATE_RESET;
+ }
+ break;
+ case STATE_RESET:
+ if (!(cmd_fis[15] & ATA_SRST)) {
+ ahci_reset_port(s, port);
+ }
+ break;
+ }
+ return;
+ }
+
+ /* Check for NCQ command */
+ if (is_ncq(cmd_fis[2])) {
+ process_ncq_command(s, port, cmd_fis, slot);
+ return;
+ }
+
+ /* Decompose the FIS:
+ * AHCI does not interpret FIS packets, it only forwards them.
+ * SATA 1.0 describes how to decode LBA28 and CHS FIS packets.
+ * Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets.
+ *
+ * ATA4 describes sector number for LBA28/CHS commands.
+ * ATA6 describes sector number for LBA48 commands.
+ * ATA8 deprecates CHS fully, describing only LBA28/48.
+ *
+ * We dutifully convert the FIS into IDE registers, and allow the
+ * core layer to interpret them as needed. */
+ ide_state->feature = cmd_fis[3];
+ ide_state->sector = cmd_fis[4]; /* LBA 7:0 */
+ ide_state->lcyl = cmd_fis[5]; /* LBA 15:8 */
+ ide_state->hcyl = cmd_fis[6]; /* LBA 23:16 */
+ ide_state->select = cmd_fis[7]; /* LBA 27:24 (LBA28) */
+ ide_state->hob_sector = cmd_fis[8]; /* LBA 31:24 */
+ ide_state->hob_lcyl = cmd_fis[9]; /* LBA 39:32 */
+ ide_state->hob_hcyl = cmd_fis[10]; /* LBA 47:40 */
+ ide_state->hob_feature = cmd_fis[11];
+ ide_state->nsector = (int64_t)((cmd_fis[13] << 8) | cmd_fis[12]);
+ /* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */
+ /* 15: Only valid when UPDATE_COMMAND not set. */
+
+ /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command
+ * table to ide_state->io_buffer */
+ if (opts & AHCI_CMD_ATAPI) {
+ memcpy(ide_state->io_buffer, &cmd_fis[AHCI_COMMAND_TABLE_ACMD], 0x10);
+ if (trace_event_get_state_backends(TRACE_HANDLE_REG_H2D_FIS_DUMP)) {
+ char *pretty_fis = ahci_pretty_buffer_fis(ide_state->io_buffer, 0x10);
+ trace_handle_reg_h2d_fis_dump(s, port, pretty_fis);
+ g_free(pretty_fis);
+ }
+ }
+
+ ide_state->error = 0;
+ s->dev[port].done_first_drq = false;
+ /* Reset transferred byte counter */
+ cmd->status = 0;
+
+ /* We're ready to process the command in FIS byte 2. */
+ ide_exec_cmd(&s->dev[port].port, cmd_fis[2]);
+}
+
+static int handle_cmd(AHCIState *s, int port, uint8_t slot)
+{
+ IDEState *ide_state;
+ uint64_t tbl_addr;
+ AHCICmdHdr *cmd;
+ uint8_t *cmd_fis;
+ dma_addr_t cmd_len;
+
+ if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) {
+ /* Engine currently busy, try again later */
+ trace_handle_cmd_busy(s, port);
+ return -1;
+ }
+
+ if (!s->dev[port].lst) {
+ trace_handle_cmd_nolist(s, port);
+ return -1;
+ }
+ cmd = get_cmd_header(s, port, slot);
+ /* remember current slot handle for later */
+ s->dev[port].cur_cmd = cmd;
+
+ /* The device we are working for */
+ ide_state = &s->dev[port].port.ifs[0];
+ if (!ide_state->blk) {
+ trace_handle_cmd_badport(s, port);
+ return -1;
+ }
+
+ tbl_addr = le64_to_cpu(cmd->tbl_addr);
+ cmd_len = 0x80;
+ cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len,
+ DMA_DIRECTION_TO_DEVICE);
+ if (!cmd_fis) {
+ trace_handle_cmd_badfis(s, port);
+ return -1;
+ } else if (cmd_len != 0x80) {
+ ahci_trigger_irq(s, &s->dev[port], AHCI_PORT_IRQ_BIT_HBFS);
+ trace_handle_cmd_badmap(s, port, cmd_len);
+ goto out;
+ }
+ if (trace_event_get_state_backends(TRACE_HANDLE_CMD_FIS_DUMP)) {
+ char *pretty_fis = ahci_pretty_buffer_fis(cmd_fis, 0x80);
+ trace_handle_cmd_fis_dump(s, port, pretty_fis);
+ g_free(pretty_fis);
+ }
+ switch (cmd_fis[0]) {
+ case SATA_FIS_TYPE_REGISTER_H2D:
+ handle_reg_h2d_fis(s, port, slot, cmd_fis);
+ break;
+ default:
+ trace_handle_cmd_unhandled_fis(s, port,
+ cmd_fis[0], cmd_fis[1], cmd_fis[2]);
+ break;
+ }
+
+out:
+ dma_memory_unmap(s->as, cmd_fis, cmd_len, DMA_DIRECTION_TO_DEVICE,
+ cmd_len);
+
+ if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) {
+ /* async command, complete later */
+ s->dev[port].busy_slot = slot;
+ return -1;
+ }
+
+ /* done handling the command */
+ return 0;
+}
+
+/* Transfer PIO data between RAM and device */
+static void ahci_pio_transfer(const IDEDMA *dma)
+{
+ AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
+ IDEState *s = &ad->port.ifs[0];
+ uint32_t size = (uint32_t)(s->data_end - s->data_ptr);
+ /* write == ram -> device */
+ uint16_t opts = le16_to_cpu(ad->cur_cmd->opts);
+ int is_write = opts & AHCI_CMD_WRITE;
+ int is_atapi = opts & AHCI_CMD_ATAPI;
+ int has_sglist = 0;
+ bool pio_fis_i;
+
+ /* The PIO Setup FIS is received prior to transfer, but the interrupt
+ * is only triggered after data is received.
+ *
+ * The device only sets the 'I' bit in the PIO Setup FIS for device->host
+ * requests (see "DPIOI1" in the SATA spec), or for host->device DRQs after
+ * the first (see "DPIOO1"). The latter is consistent with the spec's
+ * description of the PACKET protocol, where the command part of ATAPI requests
+ * ("DPKT0") has the 'I' bit clear, while the data part of PIO ATAPI requests
+ * ("DPKT4a" and "DPKT7") has the 'I' bit set for both directions for all DRQs.
+ */
+ pio_fis_i = ad->done_first_drq || (!is_atapi && !is_write);
+ ahci_write_fis_pio(ad, size, pio_fis_i);
+
+ if (is_atapi && !ad->done_first_drq) {
+ /* already prepopulated iobuffer */
+ goto out;
+ }
+
+ if (ahci_dma_prepare_buf(dma, size)) {
+ has_sglist = 1;
+ }
+
+ trace_ahci_pio_transfer(ad->hba, ad->port_no, is_write ? "writ" : "read",
+ size, is_atapi ? "atapi" : "ata",
+ has_sglist ? "" : "o");
+
+ if (has_sglist && size) {
+ if (is_write) {
+ dma_buf_write(s->data_ptr, size, &s->sg);
+ } else {
+ dma_buf_read(s->data_ptr, size, &s->sg);
+ }
+ }
+
+ /* Update number of transferred bytes, destroy sglist */
+ dma_buf_commit(s, size);
+
+out:
+ /* declare that we processed everything */
+ s->data_ptr = s->data_end;
+
+ ad->done_first_drq = true;
+ if (pio_fis_i) {
+ ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_PSS);
+ }
+}
+
+static void ahci_start_dma(const IDEDMA *dma, IDEState *s,
+ BlockCompletionFunc *dma_cb)
+{
+ AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
+ trace_ahci_start_dma(ad->hba, ad->port_no);
+ s->io_buffer_offset = 0;
+ dma_cb(s, 0);
+}
+
+static void ahci_restart_dma(const IDEDMA *dma)
+{
+ /* Nothing to do, ahci_start_dma already resets s->io_buffer_offset. */
+}
+
+/**
+ * IDE/PIO restarts are handled by the core layer, but NCQ commands
+ * need an extra kick from the AHCI HBA.
+ */
+static void ahci_restart(const IDEDMA *dma)
+{
+ AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
+ int i;
+
+ for (i = 0; i < AHCI_MAX_CMDS; i++) {
+ NCQTransferState *ncq_tfs = &ad->ncq_tfs[i];
+ if (ncq_tfs->halt) {
+ execute_ncq_command(ncq_tfs);
+ }
+ }
+}
+
+/**
+ * Called in DMA and PIO R/W chains to read the PRDT.
+ * Not shared with NCQ pathways.
+ */
+static int32_t ahci_dma_prepare_buf(const IDEDMA *dma, int32_t limit)
+{
+ AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
+ IDEState *s = &ad->port.ifs[0];
+
+ if (ahci_populate_sglist(ad, &s->sg, ad->cur_cmd,
+ limit, s->io_buffer_offset) == -1) {
+ trace_ahci_dma_prepare_buf_fail(ad->hba, ad->port_no);
+ return -1;
+ }
+ s->io_buffer_size = s->sg.size;
+
+ trace_ahci_dma_prepare_buf(ad->hba, ad->port_no, limit, s->io_buffer_size);
+ return s->io_buffer_size;
+}
+
+/**
+ * Updates the command header with a bytes-read value.
+ * Called via dma_buf_commit, for both DMA and PIO paths.
+ * sglist destruction is handled within dma_buf_commit.
+ */
+static void ahci_commit_buf(const IDEDMA *dma, uint32_t tx_bytes)
+{
+ AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
+
+ tx_bytes += le32_to_cpu(ad->cur_cmd->status);
+ ad->cur_cmd->status = cpu_to_le32(tx_bytes);
+}
+
+static int ahci_dma_rw_buf(const IDEDMA *dma, bool is_write)
+{
+ AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
+ IDEState *s = &ad->port.ifs[0];
+ uint8_t *p = s->io_buffer + s->io_buffer_index;
+ int l = s->io_buffer_size - s->io_buffer_index;
+
+ if (ahci_populate_sglist(ad, &s->sg, ad->cur_cmd, l, s->io_buffer_offset)) {
+ return 0;
+ }
+
+ if (is_write) {
+ dma_buf_read(p, l, &s->sg);
+ } else {
+ dma_buf_write(p, l, &s->sg);
+ }
+
+ /* free sglist, update byte count */
+ dma_buf_commit(s, l);
+ s->io_buffer_index += l;
+
+ trace_ahci_dma_rw_buf(ad->hba, ad->port_no, l);
+ return 1;
+}
+
+static void ahci_cmd_done(const IDEDMA *dma)
+{
+ AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
+
+ trace_ahci_cmd_done(ad->hba, ad->port_no);
+
+ /* no longer busy */
+ if (ad->busy_slot != -1) {
+ ad->port_regs.cmd_issue &= ~(1 << ad->busy_slot);
+ ad->busy_slot = -1;
+ }
+
+ /* update d2h status */
+ ahci_write_fis_d2h(ad);
+
+ if (ad->port_regs.cmd_issue && !ad->check_bh) {
+ ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad);
+ qemu_bh_schedule(ad->check_bh);
+ }
+}
+
+static void ahci_irq_set(void *opaque, int n, int level)
+{
+ qemu_log_mask(LOG_UNIMP, "ahci: IRQ#%d level:%d\n", n, level);
+}
+
+static const IDEDMAOps ahci_dma_ops = {
+ .start_dma = ahci_start_dma,
+ .restart = ahci_restart,
+ .restart_dma = ahci_restart_dma,
+ .pio_transfer = ahci_pio_transfer,
+ .prepare_buf = ahci_dma_prepare_buf,
+ .commit_buf = ahci_commit_buf,
+ .rw_buf = ahci_dma_rw_buf,
+ .cmd_done = ahci_cmd_done,
+};
+
+void ahci_init(AHCIState *s, DeviceState *qdev)
+{
+ s->container = qdev;
+ /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
+ memory_region_init_io(&s->mem, OBJECT(qdev), &ahci_mem_ops, s,
+ "ahci", AHCI_MEM_BAR_SIZE);
+ memory_region_init_io(&s->idp, OBJECT(qdev), &ahci_idp_ops, s,
+ "ahci-idp", 32);
+}
+
+void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports)
+{
+ qemu_irq *irqs;
+ int i;
+
+ s->as = as;
+ s->ports = ports;
+ s->dev = g_new0(AHCIDevice, ports);
+ ahci_reg_init(s);
+ irqs = qemu_allocate_irqs(ahci_irq_set, s, s->ports);
+ for (i = 0; i < s->ports; i++) {
+ AHCIDevice *ad = &s->dev[i];
+
+ ide_bus_init(&ad->port, sizeof(ad->port), qdev, i, 1);
+ ide_init2(&ad->port, irqs[i]);
+
+ ad->hba = s;
+ ad->port_no = i;
+ ad->port.dma = &ad->dma;
+ ad->port.dma->ops = &ahci_dma_ops;
+ ide_register_restart_cb(&ad->port);
+ }
+ g_free(irqs);
+}
+
+void ahci_uninit(AHCIState *s)
+{
+ int i, j;
+
+ for (i = 0; i < s->ports; i++) {
+ AHCIDevice *ad = &s->dev[i];
+
+ for (j = 0; j < 2; j++) {
+ IDEState *s = &ad->port.ifs[j];
+
+ ide_exit(s);
+ }
+ object_unparent(OBJECT(&ad->port));
+ }
+
+ g_free(s->dev);
+}
+
+void ahci_reset(AHCIState *s)
+{
+ AHCIPortRegs *pr;
+ int i;
+
+ trace_ahci_reset(s);
+
+ s->control_regs.irqstatus = 0;
+ /* AHCI Enable (AE)
+ * The implementation of this bit is dependent upon the value of the
+ * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and
+ * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be
+ * read-only and shall have a reset value of '1'.
+ *
+ * We set HOST_CAP_AHCI so we must enable AHCI at reset.
+ */
+ s->control_regs.ghc = HOST_CTL_AHCI_EN;
+
+ for (i = 0; i < s->ports; i++) {
+ pr = &s->dev[i].port_regs;
+ pr->irq_stat = 0;
+ pr->irq_mask = 0;
+ pr->scr_ctl = 0;
+ pr->cmd = PORT_CMD_SPIN_UP | PORT_CMD_POWER_ON;
+ ahci_reset_port(s, i);
+ }
+}
+
+static const VMStateDescription vmstate_ncq_tfs = {
+ .name = "ncq state",
+ .version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(sector_count, NCQTransferState),
+ VMSTATE_UINT64(lba, NCQTransferState),
+ VMSTATE_UINT8(tag, NCQTransferState),
+ VMSTATE_UINT8(cmd, NCQTransferState),
+ VMSTATE_UINT8(slot, NCQTransferState),
+ VMSTATE_BOOL(used, NCQTransferState),
+ VMSTATE_BOOL(halt, NCQTransferState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_ahci_device = {
+ .name = "ahci port",
+ .version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_IDE_BUS(port, AHCIDevice),
+ VMSTATE_IDE_DRIVE(port.ifs[0], AHCIDevice),
+ VMSTATE_UINT32(port_state, AHCIDevice),
+ VMSTATE_UINT32(finished, AHCIDevice),
+ VMSTATE_UINT32(port_regs.lst_addr, AHCIDevice),
+ VMSTATE_UINT32(port_regs.lst_addr_hi, AHCIDevice),
+ VMSTATE_UINT32(port_regs.fis_addr, AHCIDevice),
+ VMSTATE_UINT32(port_regs.fis_addr_hi, AHCIDevice),
+ VMSTATE_UINT32(port_regs.irq_stat, AHCIDevice),
+ VMSTATE_UINT32(port_regs.irq_mask, AHCIDevice),
+ VMSTATE_UINT32(port_regs.cmd, AHCIDevice),
+ VMSTATE_UINT32(port_regs.tfdata, AHCIDevice),
+ VMSTATE_UINT32(port_regs.sig, AHCIDevice),
+ VMSTATE_UINT32(port_regs.scr_stat, AHCIDevice),
+ VMSTATE_UINT32(port_regs.scr_ctl, AHCIDevice),
+ VMSTATE_UINT32(port_regs.scr_err, AHCIDevice),
+ VMSTATE_UINT32(port_regs.scr_act, AHCIDevice),
+ VMSTATE_UINT32(port_regs.cmd_issue, AHCIDevice),
+ VMSTATE_BOOL(done_first_drq, AHCIDevice),
+ VMSTATE_INT32(busy_slot, AHCIDevice),
+ VMSTATE_BOOL(init_d2h_sent, AHCIDevice),
+ VMSTATE_STRUCT_ARRAY(ncq_tfs, AHCIDevice, AHCI_MAX_CMDS,
+ 1, vmstate_ncq_tfs, NCQTransferState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static int ahci_state_post_load(void *opaque, int version_id)
+{
+ int i, j;
+ struct AHCIDevice *ad;
+ NCQTransferState *ncq_tfs;
+ AHCIPortRegs *pr;
+ AHCIState *s = opaque;
+
+ for (i = 0; i < s->ports; i++) {
+ ad = &s->dev[i];
+ pr = &ad->port_regs;
+
+ if (!(pr->cmd & PORT_CMD_START) && (pr->cmd & PORT_CMD_LIST_ON)) {
+ error_report("AHCI: DMA engine should be off, but status bit "
+ "indicates it is still running.");
+ return -1;
+ }
+ if (!(pr->cmd & PORT_CMD_FIS_RX) && (pr->cmd & PORT_CMD_FIS_ON)) {
+ error_report("AHCI: FIS RX engine should be off, but status bit "
+ "indicates it is still running.");
+ return -1;
+ }
+
+ /* After a migrate, the DMA/FIS engines are "off" and
+ * need to be conditionally restarted */
+ pr->cmd &= ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON);
+ if (ahci_cond_start_engines(ad) != 0) {
+ return -1;
+ }
+
+ for (j = 0; j < AHCI_MAX_CMDS; j++) {
+ ncq_tfs = &ad->ncq_tfs[j];
+ ncq_tfs->drive = ad;
+
+ if (ncq_tfs->used != ncq_tfs->halt) {
+ return -1;
+ }
+ if (!ncq_tfs->halt) {
+ continue;
+ }
+ if (!is_ncq(ncq_tfs->cmd)) {
+ return -1;
+ }
+ if (ncq_tfs->slot != ncq_tfs->tag) {
+ return -1;
+ }
+ /* If ncq_tfs->halt is justly set, the engine should be engaged,
+ * and the command list buffer should be mapped. */
+ ncq_tfs->cmdh = get_cmd_header(s, i, ncq_tfs->slot);
+ if (!ncq_tfs->cmdh) {
+ return -1;
+ }
+ ahci_populate_sglist(ncq_tfs->drive, &ncq_tfs->sglist,
+ ncq_tfs->cmdh,
+ ncq_tfs->sector_count * BDRV_SECTOR_SIZE,
+ 0);
+ if (ncq_tfs->sector_count != ncq_tfs->sglist.size >> 9) {
+ return -1;
+ }
+ }
+
+
+ /*
+ * If an error is present, ad->busy_slot will be valid and not -1.
+ * In this case, an operation is waiting to resume and will re-check
+ * for additional AHCI commands to execute upon completion.
+ *
+ * In the case where no error was present, busy_slot will be -1,
+ * and we should check to see if there are additional commands waiting.
+ */
+ if (ad->busy_slot == -1) {
+ check_cmd(s, i);
+ } else {
+ /* We are in the middle of a command, and may need to access
+ * the command header in guest memory again. */
+ if (ad->busy_slot < 0 || ad->busy_slot >= AHCI_MAX_CMDS) {
+ return -1;
+ }
+ ad->cur_cmd = get_cmd_header(s, i, ad->busy_slot);
+ }
+ }
+
+ return 0;
+}
+
+const VMStateDescription vmstate_ahci = {
+ .name = "ahci",
+ .version_id = 1,
+ .post_load = ahci_state_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev, AHCIState, ports,
+ vmstate_ahci_device, AHCIDevice),
+ VMSTATE_UINT32(control_regs.cap, AHCIState),
+ VMSTATE_UINT32(control_regs.ghc, AHCIState),
+ VMSTATE_UINT32(control_regs.irqstatus, AHCIState),
+ VMSTATE_UINT32(control_regs.impl, AHCIState),
+ VMSTATE_UINT32(control_regs.version, AHCIState),
+ VMSTATE_UINT32(idp_index, AHCIState),
+ VMSTATE_INT32_EQUAL(ports, AHCIState, NULL),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_sysbus_ahci = {
+ .name = "sysbus-ahci",
+ .fields = (VMStateField[]) {
+ VMSTATE_AHCI(ahci, SysbusAHCIState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void sysbus_ahci_reset(DeviceState *dev)
+{
+ SysbusAHCIState *s = SYSBUS_AHCI(dev);
+
+ ahci_reset(&s->ahci);
+}
+
+static void sysbus_ahci_init(Object *obj)
+{
+ SysbusAHCIState *s = SYSBUS_AHCI(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ ahci_init(&s->ahci, DEVICE(obj));
+
+ sysbus_init_mmio(sbd, &s->ahci.mem);
+ sysbus_init_irq(sbd, &s->ahci.irq);
+}
+
+static void sysbus_ahci_realize(DeviceState *dev, Error **errp)
+{
+ SysbusAHCIState *s = SYSBUS_AHCI(dev);
+
+ ahci_realize(&s->ahci, dev, &address_space_memory, s->num_ports);
+}
+
+static Property sysbus_ahci_properties[] = {
+ DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, num_ports, 1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void sysbus_ahci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = sysbus_ahci_realize;
+ dc->vmsd = &vmstate_sysbus_ahci;
+ device_class_set_props(dc, sysbus_ahci_properties);
+ dc->reset = sysbus_ahci_reset;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo sysbus_ahci_info = {
+ .name = TYPE_SYSBUS_AHCI,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SysbusAHCIState),
+ .instance_init = sysbus_ahci_init,
+ .class_init = sysbus_ahci_class_init,
+};
+
+static void sysbus_ahci_register_types(void)
+{
+ type_register_static(&sysbus_ahci_info);
+}
+
+type_init(sysbus_ahci_register_types)
+
+int32_t ahci_get_num_ports(PCIDevice *dev)
+{
+ AHCIPCIState *d = ICH9_AHCI(dev);
+ AHCIState *ahci = &d->ahci;
+
+ return ahci->ports;
+}
+
+void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd)
+{
+ AHCIPCIState *d = ICH9_AHCI(dev);
+ AHCIState *ahci = &d->ahci;
+ int i;
+
+ for (i = 0; i < ahci->ports; i++) {
+ if (hd[i] == NULL) {
+ continue;
+ }
+ ide_create_drive(&ahci->dev[i].port, 0, hd[i]);
+ }
+
+}
diff --git a/hw/ide/ahci_internal.h b/hw/ide/ahci_internal.h
new file mode 100644
index 000000000..109de9e2d
--- /dev/null
+++ b/hw/ide/ahci_internal.h
@@ -0,0 +1,393 @@
+/*
+ * QEMU AHCI Emulation
+ *
+ * Copyright (c) 2010 qiaochong@loongson.cn
+ * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com>
+ * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
+ * Copyright (c) 2010 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef HW_IDE_AHCI_INTERNAL_H
+#define HW_IDE_AHCI_INTERNAL_H
+
+#include "hw/ide/ahci.h"
+#include "hw/ide/internal.h"
+#include "hw/pci/pci.h"
+
+#define AHCI_MEM_BAR_SIZE 0x1000
+#define AHCI_MAX_PORTS 32
+#define AHCI_MAX_SG 168 /* hardware max is 64K */
+#define AHCI_DMA_BOUNDARY 0xffffffff
+#define AHCI_USE_CLUSTERING 0
+#define AHCI_MAX_CMDS 32
+#define AHCI_CMD_SZ 32
+#define AHCI_CMD_SLOT_SZ (AHCI_MAX_CMDS * AHCI_CMD_SZ)
+#define AHCI_RX_FIS_SZ 256
+#define AHCI_CMD_TBL_CDB 0x40
+#define AHCI_CMD_TBL_HDR_SZ 0x80
+#define AHCI_CMD_TBL_SZ (AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16))
+#define AHCI_CMD_TBL_AR_SZ (AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS)
+#define AHCI_PORT_PRIV_DMA_SZ (AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + \
+ AHCI_RX_FIS_SZ)
+
+#define AHCI_IRQ_ON_SG (1U << 31)
+#define AHCI_CMD_ATAPI (1 << 5)
+#define AHCI_CMD_WRITE (1 << 6)
+#define AHCI_CMD_PREFETCH (1 << 7)
+#define AHCI_CMD_RESET (1 << 8)
+#define AHCI_CMD_CLR_BUSY (1 << 10)
+
+#define RX_FIS_D2H_REG 0x40 /* offset of D2H Register FIS data */
+#define RX_FIS_SDB 0x58 /* offset of SDB FIS data */
+#define RX_FIS_UNK 0x60 /* offset of Unknown FIS data */
+
+/* global controller registers */
+enum AHCIHostReg {
+ AHCI_HOST_REG_CAP = 0, /* CAP: host capabilities */
+ AHCI_HOST_REG_CTL = 1, /* GHC: global host control */
+ AHCI_HOST_REG_IRQ_STAT = 2, /* IS: interrupt status */
+ AHCI_HOST_REG_PORTS_IMPL = 3, /* PI: bitmap of implemented ports */
+ AHCI_HOST_REG_VERSION = 4, /* VS: AHCI spec. version compliancy */
+ AHCI_HOST_REG_CCC_CTL = 5, /* CCC_CTL: CCC Control */
+ AHCI_HOST_REG_CCC_PORTS = 6, /* CCC_PORTS: CCC Ports */
+ AHCI_HOST_REG_EM_LOC = 7, /* EM_LOC: Enclosure Mgmt Location */
+ AHCI_HOST_REG_EM_CTL = 8, /* EM_CTL: Enclosure Mgmt Control */
+ AHCI_HOST_REG_CAP2 = 9, /* CAP2: host capabilities, extended */
+ AHCI_HOST_REG_BOHC = 10, /* BOHC: firmare/os handoff ctrl & status */
+ AHCI_HOST_REG__COUNT = 11
+};
+
+/* HOST_CTL bits */
+#define HOST_CTL_RESET (1 << 0) /* reset controller; self-clear */
+#define HOST_CTL_IRQ_EN (1 << 1) /* global IRQ enable */
+#define HOST_CTL_AHCI_EN (1U << 31) /* AHCI enabled */
+
+/* HOST_CAP bits */
+#define HOST_CAP_SSC (1 << 14) /* Slumber capable */
+#define HOST_CAP_AHCI (1 << 18) /* AHCI only */
+#define HOST_CAP_CLO (1 << 24) /* Command List Override support */
+#define HOST_CAP_SSS (1 << 27) /* Staggered Spin-up */
+#define HOST_CAP_NCQ (1 << 30) /* Native Command Queueing */
+#define HOST_CAP_64 (1U << 31) /* PCI DAC (64-bit DMA) support */
+
+/* registers for each SATA port */
+enum AHCIPortReg {
+ AHCI_PORT_REG_LST_ADDR = 0, /* PxCLB: command list DMA addr */
+ AHCI_PORT_REG_LST_ADDR_HI = 1, /* PxCLBU: command list DMA addr hi */
+ AHCI_PORT_REG_FIS_ADDR = 2, /* PxFB: FIS rx buf addr */
+ AHCI_PORT_REG_FIS_ADDR_HI = 3, /* PxFBU: FIX rx buf addr hi */
+ AHCI_PORT_REG_IRQ_STAT = 4, /* PxIS: interrupt status */
+ AHCI_PORT_REG_IRQ_MASK = 5, /* PxIE: interrupt enable/disable mask */
+ AHCI_PORT_REG_CMD = 6, /* PxCMD: port command */
+ /* RESERVED */
+ AHCI_PORT_REG_TFDATA = 8, /* PxTFD: taskfile data */
+ AHCI_PORT_REG_SIG = 9, /* PxSIG: device TF signature */
+ AHCI_PORT_REG_SCR_STAT = 10, /* PxSSTS: SATA phy register: SStatus */
+ AHCI_PORT_REG_SCR_CTL = 11, /* PxSCTL: SATA phy register: SControl */
+ AHCI_PORT_REG_SCR_ERR = 12, /* PxSERR: SATA phy register: SError */
+ AHCI_PORT_REG_SCR_ACT = 13, /* PxSACT: SATA phy register: SActive */
+ AHCI_PORT_REG_CMD_ISSUE = 14, /* PxCI: command issue */
+ AHCI_PORT_REG_SCR_NOTIF = 15, /* PxSNTF: SATA phy register: SNotification */
+ AHCI_PORT_REG_FIS_CTL = 16, /* PxFBS: Port multiplier switching ctl */
+ AHCI_PORT_REG_DEV_SLEEP = 17, /* PxDEVSLP: device sleep control */
+ /* RESERVED */
+ AHCI_PORT_REG_VENDOR_1 = 28, /* PxVS: Vendor Specific */
+ AHCI_PORT_REG_VENDOR_2 = 29,
+ AHCI_PORT_REG_VENDOR_3 = 30,
+ AHCI_PORT_REG_VENDOR_4 = 31,
+ AHCI_PORT_REG__COUNT = 32
+};
+
+/* Port interrupt bit descriptors */
+enum AHCIPortIRQ {
+ AHCI_PORT_IRQ_BIT_DHRS = 0,
+ AHCI_PORT_IRQ_BIT_PSS = 1,
+ AHCI_PORT_IRQ_BIT_DSS = 2,
+ AHCI_PORT_IRQ_BIT_SDBS = 3,
+ AHCI_PORT_IRQ_BIT_UFS = 4,
+ AHCI_PORT_IRQ_BIT_DPS = 5,
+ AHCI_PORT_IRQ_BIT_PCS = 6,
+ AHCI_PORT_IRQ_BIT_DMPS = 7,
+ /* RESERVED */
+ AHCI_PORT_IRQ_BIT_PRCS = 22,
+ AHCI_PORT_IRQ_BIT_IPMS = 23,
+ AHCI_PORT_IRQ_BIT_OFS = 24,
+ /* RESERVED */
+ AHCI_PORT_IRQ_BIT_INFS = 26,
+ AHCI_PORT_IRQ_BIT_IFS = 27,
+ AHCI_PORT_IRQ_BIT_HBDS = 28,
+ AHCI_PORT_IRQ_BIT_HBFS = 29,
+ AHCI_PORT_IRQ_BIT_TFES = 30,
+ AHCI_PORT_IRQ_BIT_CPDS = 31,
+ AHCI_PORT_IRQ__COUNT = 32
+};
+
+
+/* PORT_IRQ_{STAT,MASK} bits */
+#define PORT_IRQ_COLD_PRES (1U << 31) /* cold presence detect */
+#define PORT_IRQ_TF_ERR (1 << 30) /* task file error */
+#define PORT_IRQ_HBUS_ERR (1 << 29) /* host bus fatal error */
+#define PORT_IRQ_HBUS_DATA_ERR (1 << 28) /* host bus data error */
+#define PORT_IRQ_IF_ERR (1 << 27) /* interface fatal error */
+#define PORT_IRQ_IF_NONFATAL (1 << 26) /* interface non-fatal error */
+ /* reserved */
+#define PORT_IRQ_OVERFLOW (1 << 24) /* xfer exhausted available S/G */
+#define PORT_IRQ_BAD_PMP (1 << 23) /* incorrect port multiplier */
+#define PORT_IRQ_PHYRDY (1 << 22) /* PhyRdy changed */
+ /* reserved */
+#define PORT_IRQ_DEV_ILCK (1 << 7) /* device interlock */
+#define PORT_IRQ_CONNECT (1 << 6) /* port connect change status */
+#define PORT_IRQ_SG_DONE (1 << 5) /* descriptor processed */
+#define PORT_IRQ_UNK_FIS (1 << 4) /* unknown FIS rx'd */
+#define PORT_IRQ_SDB_FIS (1 << 3) /* Set Device Bits FIS rx'd */
+#define PORT_IRQ_DMAS_FIS (1 << 2) /* DMA Setup FIS rx'd */
+#define PORT_IRQ_PIOS_FIS (1 << 1) /* PIO Setup FIS rx'd */
+#define PORT_IRQ_D2H_REG_FIS (1 << 0) /* D2H Register FIS rx'd */
+
+#define PORT_IRQ_FREEZE (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | \
+ PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY | \
+ PORT_IRQ_UNK_FIS)
+#define PORT_IRQ_ERROR (PORT_IRQ_FREEZE | PORT_IRQ_TF_ERR | \
+ PORT_IRQ_HBUS_DATA_ERR)
+#define DEF_PORT_IRQ (PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | \
+ PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | \
+ PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
+
+/* PORT_CMD bits */
+#define PORT_CMD_ATAPI (1 << 24) /* Device is ATAPI */
+#define PORT_CMD_LIST_ON (1 << 15) /* cmd list DMA engine running */
+#define PORT_CMD_FIS_ON (1 << 14) /* FIS DMA engine running */
+#define PORT_CMD_FIS_RX (1 << 4) /* Enable FIS receive DMA engine */
+#define PORT_CMD_CLO (1 << 3) /* Command list override */
+#define PORT_CMD_POWER_ON (1 << 2) /* Power up device */
+#define PORT_CMD_SPIN_UP (1 << 1) /* Spin up device */
+#define PORT_CMD_START (1 << 0) /* Enable port DMA engine */
+
+#define PORT_CMD_ICC_MASK (0xfU << 28) /* i/f ICC state mask */
+#define PORT_CMD_ICC_ACTIVE (0x1 << 28) /* Put i/f in active state */
+#define PORT_CMD_ICC_PARTIAL (0x2 << 28) /* Put i/f in partial state */
+#define PORT_CMD_ICC_SLUMBER (0x6 << 28) /* Put i/f in slumber state */
+
+#define PORT_CMD_RO_MASK 0x007dffe0 /* Which CMD bits are read only? */
+
+/* ap->flags bits */
+#define AHCI_FLAG_NO_NCQ (1 << 24)
+#define AHCI_FLAG_IGN_IRQ_IF_ERR (1 << 25) /* ignore IRQ_IF_ERR */
+#define AHCI_FLAG_HONOR_PI (1 << 26) /* honor PORTS_IMPL */
+#define AHCI_FLAG_IGN_SERR_INTERNAL (1 << 27) /* ignore SERR_INTERNAL */
+#define AHCI_FLAG_32BIT_ONLY (1 << 28) /* force 32bit */
+
+#define ATA_SRST (1 << 2) /* software reset */
+
+#define STATE_RUN 0
+#define STATE_RESET 1
+
+#define SATA_SCR_SSTATUS_DET_NODEV 0x0
+#define SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP 0x3
+
+#define SATA_SCR_SSTATUS_SPD_NODEV 0x00
+#define SATA_SCR_SSTATUS_SPD_GEN1 0x10
+
+#define SATA_SCR_SSTATUS_IPM_NODEV 0x000
+#define SATA_SCR_SSTATUS_IPM_ACTIVE 0X100
+
+#define AHCI_SCR_SCTL_DET 0xf
+
+#define SATA_FIS_TYPE_REGISTER_H2D 0x27
+#define SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER 0x80
+#define SATA_FIS_TYPE_REGISTER_D2H 0x34
+#define SATA_FIS_TYPE_PIO_SETUP 0x5f
+#define SATA_FIS_TYPE_SDB 0xA1
+
+#define AHCI_CMD_HDR_CMD_FIS_LEN 0x1f
+#define AHCI_CMD_HDR_PRDT_LEN 16
+
+#define SATA_SIGNATURE_CDROM 0xeb140101
+#define SATA_SIGNATURE_DISK 0x00000101
+
+#define AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR 0x2c
+
+#define AHCI_PORT_REGS_START_ADDR 0x100
+#define AHCI_PORT_ADDR_OFFSET_MASK 0x7f
+#define AHCI_PORT_ADDR_OFFSET_LEN 0x80
+
+#define AHCI_NUM_COMMAND_SLOTS 31
+#define AHCI_SUPPORTED_SPEED 20
+#define AHCI_SUPPORTED_SPEED_GEN1 1
+#define AHCI_VERSION_1_0 0x10000
+
+#define AHCI_PROGMODE_MAJOR_REV_1 1
+
+#define AHCI_COMMAND_TABLE_ACMD 0x40
+
+#define AHCI_PRDT_SIZE_MASK 0x3fffff
+
+#define IDE_FEATURE_DMA 1
+
+#define READ_FPDMA_QUEUED 0x60
+#define WRITE_FPDMA_QUEUED 0x61
+#define NCQ_NON_DATA 0x63
+#define RECEIVE_FPDMA_QUEUED 0x65
+#define SEND_FPDMA_QUEUED 0x64
+
+#define NCQ_FIS_FUA_MASK 0x80
+#define NCQ_FIS_RARC_MASK 0x01
+
+#define RES_FIS_DSFIS 0x00
+#define RES_FIS_PSFIS 0x20
+#define RES_FIS_RFIS 0x40
+#define RES_FIS_SDBFIS 0x58
+#define RES_FIS_UFIS 0x60
+
+#define SATA_CAP_SIZE 0x8
+#define SATA_CAP_REV 0x2
+#define SATA_CAP_BAR 0x4
+
+typedef struct AHCIPortRegs {
+ uint32_t lst_addr;
+ uint32_t lst_addr_hi;
+ uint32_t fis_addr;
+ uint32_t fis_addr_hi;
+ uint32_t irq_stat;
+ uint32_t irq_mask;
+ uint32_t cmd;
+ uint32_t unused0;
+ uint32_t tfdata;
+ uint32_t sig;
+ uint32_t scr_stat;
+ uint32_t scr_ctl;
+ uint32_t scr_err;
+ uint32_t scr_act;
+ uint32_t cmd_issue;
+ uint32_t reserved;
+} AHCIPortRegs;
+
+typedef struct AHCICmdHdr {
+ uint16_t opts;
+ uint16_t prdtl;
+ uint32_t status;
+ uint64_t tbl_addr;
+ uint32_t reserved[4];
+} QEMU_PACKED AHCICmdHdr;
+
+typedef struct AHCI_SG {
+ uint64_t addr;
+ uint32_t reserved;
+ uint32_t flags_size;
+} QEMU_PACKED AHCI_SG;
+
+typedef struct NCQTransferState {
+ AHCIDevice *drive;
+ BlockAIOCB *aiocb;
+ AHCICmdHdr *cmdh;
+ QEMUSGList sglist;
+ BlockAcctCookie acct;
+ uint32_t sector_count;
+ uint64_t lba;
+ uint8_t tag;
+ uint8_t cmd;
+ uint8_t slot;
+ bool used;
+ bool halt;
+} NCQTransferState;
+
+struct AHCIDevice {
+ IDEDMA dma;
+ IDEBus port;
+ int port_no;
+ uint32_t port_state;
+ uint32_t finished;
+ AHCIPortRegs port_regs;
+ struct AHCIState *hba;
+ QEMUBH *check_bh;
+ uint8_t *lst;
+ uint8_t *res_fis;
+ bool done_first_drq;
+ int32_t busy_slot;
+ bool init_d2h_sent;
+ AHCICmdHdr *cur_cmd;
+ NCQTransferState ncq_tfs[AHCI_MAX_CMDS];
+};
+
+struct AHCIPCIState {
+ /*< private >*/
+ PCIDevice parent_obj;
+ /*< public >*/
+
+ AHCIState ahci;
+};
+
+extern const VMStateDescription vmstate_ahci;
+
+#define VMSTATE_AHCI(_field, _state) { \
+ .name = (stringify(_field)), \
+ .size = sizeof(AHCIState), \
+ .vmsd = &vmstate_ahci, \
+ .flags = VMS_STRUCT, \
+ .offset = vmstate_offset_value(_state, _field, AHCIState), \
+}
+
+/**
+ * NCQFrame is the same as a Register H2D FIS (described in SATA 3.2),
+ * but some fields have been re-mapped and re-purposed, as seen in
+ * SATA 3.2 section 13.6.4.1 ("READ FPDMA QUEUED")
+ *
+ * cmd_fis[3], feature 7:0, becomes sector count 7:0.
+ * cmd_fis[7], device 7:0, uses bit 7 as the Force Unit Access bit.
+ * cmd_fis[11], feature 15:8, becomes sector count 15:8.
+ * cmd_fis[12], count 7:0, becomes the NCQ TAG (7:3) and RARC bit (0)
+ * cmd_fis[13], count 15:8, becomes the priority value (7:6)
+ * bytes 16-19 become an le32 "auxiliary" field.
+ */
+typedef struct NCQFrame {
+ uint8_t fis_type;
+ uint8_t c;
+ uint8_t command;
+ uint8_t sector_count_low; /* (feature 7:0) */
+ uint8_t lba0;
+ uint8_t lba1;
+ uint8_t lba2;
+ uint8_t fua; /* (device 7:0) */
+ uint8_t lba3;
+ uint8_t lba4;
+ uint8_t lba5;
+ uint8_t sector_count_high; /* (feature 15:8) */
+ uint8_t tag; /* (count 0:7) */
+ uint8_t prio; /* (count 15:8) */
+ uint8_t icc;
+ uint8_t control;
+ uint8_t aux0;
+ uint8_t aux1;
+ uint8_t aux2;
+ uint8_t aux3;
+} QEMU_PACKED NCQFrame;
+
+typedef struct SDBFIS {
+ uint8_t type;
+ uint8_t flags;
+ uint8_t status;
+ uint8_t error;
+ uint32_t payload;
+} QEMU_PACKED SDBFIS;
+
+void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports);
+void ahci_init(AHCIState *s, DeviceState *qdev);
+void ahci_uninit(AHCIState *s);
+
+void ahci_reset(AHCIState *s);
+
+#endif /* HW_IDE_AHCI_INTERNAL_H */
diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
new file mode 100644
index 000000000..b626199e3
--- /dev/null
+++ b/hw/ide/atapi.c
@@ -0,0 +1,1377 @@
+/*
+ * QEMU ATAPI Emulation
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/ide/internal.h"
+#include "hw/scsi/scsi.h"
+#include "sysemu/block-backend.h"
+#include "trace.h"
+
+#define ATAPI_SECTOR_BITS (2 + BDRV_SECTOR_BITS)
+#define ATAPI_SECTOR_SIZE (1 << ATAPI_SECTOR_BITS)
+
+static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret);
+
+static void padstr8(uint8_t *buf, int buf_size, const char *src)
+{
+ int i;
+ for(i = 0; i < buf_size; i++) {
+ if (*src)
+ buf[i] = *src++;
+ else
+ buf[i] = ' ';
+ }
+}
+
+static void lba_to_msf(uint8_t *buf, int lba)
+{
+ lba += 150;
+ buf[0] = (lba / 75) / 60;
+ buf[1] = (lba / 75) % 60;
+ buf[2] = lba % 75;
+}
+
+static inline int media_present(IDEState *s)
+{
+ return !s->tray_open && s->nb_sectors > 0;
+}
+
+/* XXX: DVDs that could fit on a CD will be reported as a CD */
+static inline int media_is_dvd(IDEState *s)
+{
+ return (media_present(s) && s->nb_sectors > CD_MAX_SECTORS);
+}
+
+static inline int media_is_cd(IDEState *s)
+{
+ return (media_present(s) && s->nb_sectors <= CD_MAX_SECTORS);
+}
+
+static void cd_data_to_raw(uint8_t *buf, int lba)
+{
+ /* sync bytes */
+ buf[0] = 0x00;
+ memset(buf + 1, 0xff, 10);
+ buf[11] = 0x00;
+ buf += 12;
+ /* MSF */
+ lba_to_msf(buf, lba);
+ buf[3] = 0x01; /* mode 1 data */
+ buf += 4;
+ /* data */
+ buf += 2048;
+ /* XXX: ECC not computed */
+ memset(buf, 0, 288);
+}
+
+static int
+cd_read_sector_sync(IDEState *s)
+{
+ int ret;
+ block_acct_start(blk_get_stats(s->blk), &s->acct,
+ ATAPI_SECTOR_SIZE, BLOCK_ACCT_READ);
+
+ trace_cd_read_sector_sync(s->lba);
+
+ switch (s->cd_sector_size) {
+ case 2048:
+ ret = blk_pread(s->blk, (int64_t)s->lba << ATAPI_SECTOR_BITS,
+ s->io_buffer, ATAPI_SECTOR_SIZE);
+ break;
+ case 2352:
+ ret = blk_pread(s->blk, (int64_t)s->lba << ATAPI_SECTOR_BITS,
+ s->io_buffer + 16, ATAPI_SECTOR_SIZE);
+ if (ret >= 0) {
+ cd_data_to_raw(s->io_buffer, s->lba);
+ }
+ break;
+ default:
+ block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
+ return -EIO;
+ }
+
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(s->blk), &s->acct);
+ } else {
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+ s->lba++;
+ s->io_buffer_index = 0;
+ }
+
+ return ret;
+}
+
+static void cd_read_sector_cb(void *opaque, int ret)
+{
+ IDEState *s = opaque;
+
+ trace_cd_read_sector_cb(s->lba, ret);
+
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(s->blk), &s->acct);
+ ide_atapi_io_error(s, ret);
+ return;
+ }
+
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+
+ if (s->cd_sector_size == 2352) {
+ cd_data_to_raw(s->io_buffer, s->lba);
+ }
+
+ s->lba++;
+ s->io_buffer_index = 0;
+ s->status &= ~BUSY_STAT;
+
+ ide_atapi_cmd_reply_end(s);
+}
+
+static int cd_read_sector(IDEState *s)
+{
+ void *buf;
+
+ if (s->cd_sector_size != 2048 && s->cd_sector_size != 2352) {
+ block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
+ return -EINVAL;
+ }
+
+ buf = (s->cd_sector_size == 2352) ? s->io_buffer + 16 : s->io_buffer;
+ qemu_iovec_init_buf(&s->qiov, buf, ATAPI_SECTOR_SIZE);
+
+ trace_cd_read_sector(s->lba);
+
+ block_acct_start(blk_get_stats(s->blk), &s->acct,
+ ATAPI_SECTOR_SIZE, BLOCK_ACCT_READ);
+
+ ide_buffered_readv(s, (int64_t)s->lba << 2, &s->qiov, 4,
+ cd_read_sector_cb, s);
+
+ s->status |= BUSY_STAT;
+ return 0;
+}
+
+void ide_atapi_cmd_ok(IDEState *s)
+{
+ s->error = 0;
+ s->status = READY_STAT | SEEK_STAT;
+ s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD;
+ ide_transfer_stop(s);
+ ide_set_irq(s->bus);
+}
+
+void ide_atapi_cmd_error(IDEState *s, int sense_key, int asc)
+{
+ trace_ide_atapi_cmd_error(s, sense_key, asc);
+ s->error = sense_key << 4;
+ s->status = READY_STAT | ERR_STAT;
+ s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD;
+ s->sense_key = sense_key;
+ s->asc = asc;
+ ide_transfer_stop(s);
+ ide_set_irq(s->bus);
+}
+
+void ide_atapi_io_error(IDEState *s, int ret)
+{
+ /* XXX: handle more errors */
+ if (ret == -ENOMEDIUM) {
+ ide_atapi_cmd_error(s, NOT_READY,
+ ASC_MEDIUM_NOT_PRESENT);
+ } else {
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST,
+ ASC_LOGICAL_BLOCK_OOR);
+ }
+}
+
+static uint16_t atapi_byte_count_limit(IDEState *s)
+{
+ uint16_t bcl;
+
+ bcl = s->lcyl | (s->hcyl << 8);
+ if (bcl == 0xffff) {
+ return 0xfffe;
+ }
+ return bcl;
+}
+
+/* The whole ATAPI transfer logic is handled in this function */
+void ide_atapi_cmd_reply_end(IDEState *s)
+{
+ int byte_count_limit, size, ret;
+ while (s->packet_transfer_size > 0) {
+ trace_ide_atapi_cmd_reply_end(s, s->packet_transfer_size,
+ s->elementary_transfer_size,
+ s->io_buffer_index);
+
+ /* see if a new sector must be read */
+ if (s->lba != -1 && s->io_buffer_index >= s->cd_sector_size) {
+ if (!s->elementary_transfer_size) {
+ ret = cd_read_sector(s);
+ if (ret < 0) {
+ ide_atapi_io_error(s, ret);
+ }
+ return;
+ } else {
+ /* rebuffering within an elementary transfer is
+ * only possible with a sync request because we
+ * end up with a race condition otherwise */
+ ret = cd_read_sector_sync(s);
+ if (ret < 0) {
+ ide_atapi_io_error(s, ret);
+ return;
+ }
+ }
+ }
+ if (s->elementary_transfer_size > 0) {
+ /* there are some data left to transmit in this elementary
+ transfer */
+ size = s->cd_sector_size - s->io_buffer_index;
+ if (size > s->elementary_transfer_size)
+ size = s->elementary_transfer_size;
+ } else {
+ /* a new transfer is needed */
+ s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO;
+ ide_set_irq(s->bus);
+ byte_count_limit = atapi_byte_count_limit(s);
+ trace_ide_atapi_cmd_reply_end_bcl(s, byte_count_limit);
+ size = s->packet_transfer_size;
+ if (size > byte_count_limit) {
+ /* byte count limit must be even if this case */
+ if (byte_count_limit & 1)
+ byte_count_limit--;
+ size = byte_count_limit;
+ }
+ s->lcyl = size;
+ s->hcyl = size >> 8;
+ s->elementary_transfer_size = size;
+ /* we cannot transmit more than one sector at a time */
+ if (s->lba != -1) {
+ if (size > (s->cd_sector_size - s->io_buffer_index))
+ size = (s->cd_sector_size - s->io_buffer_index);
+ }
+ trace_ide_atapi_cmd_reply_end_new(s, s->status);
+ }
+ s->packet_transfer_size -= size;
+ s->elementary_transfer_size -= size;
+ s->io_buffer_index += size;
+ assert(size <= s->io_buffer_total_len);
+ assert(s->io_buffer_index <= s->io_buffer_total_len);
+
+ /* Some adapters process PIO data right away. In that case, we need
+ * to avoid mutual recursion between ide_transfer_start
+ * and ide_atapi_cmd_reply_end.
+ */
+ if (!ide_transfer_start_norecurse(s,
+ s->io_buffer + s->io_buffer_index - size,
+ size, ide_atapi_cmd_reply_end)) {
+ return;
+ }
+ }
+
+ /* end of transfer */
+ trace_ide_atapi_cmd_reply_end_eot(s, s->status);
+ ide_atapi_cmd_ok(s);
+ ide_set_irq(s->bus);
+}
+
+/* send a reply of 'size' bytes in s->io_buffer to an ATAPI command */
+static void ide_atapi_cmd_reply(IDEState *s, int size, int max_size)
+{
+ if (size > max_size)
+ size = max_size;
+ s->lba = -1; /* no sector read */
+ s->packet_transfer_size = size;
+ s->io_buffer_size = size; /* dma: send the reply data as one chunk */
+ s->elementary_transfer_size = 0;
+
+ if (s->atapi_dma) {
+ block_acct_start(blk_get_stats(s->blk), &s->acct, size,
+ BLOCK_ACCT_READ);
+ s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
+ ide_start_dma(s, ide_atapi_cmd_read_dma_cb);
+ } else {
+ s->status = READY_STAT | SEEK_STAT;
+ s->io_buffer_index = 0;
+ ide_atapi_cmd_reply_end(s);
+ }
+}
+
+/* start a CD-CDROM read command */
+static void ide_atapi_cmd_read_pio(IDEState *s, int lba, int nb_sectors,
+ int sector_size)
+{
+ assert(0 <= lba && lba < (s->nb_sectors >> 2));
+
+ s->lba = lba;
+ s->packet_transfer_size = nb_sectors * sector_size;
+ s->elementary_transfer_size = 0;
+ s->io_buffer_index = sector_size;
+ s->cd_sector_size = sector_size;
+
+ ide_atapi_cmd_reply_end(s);
+}
+
+static void ide_atapi_cmd_check_status(IDEState *s)
+{
+ trace_ide_atapi_cmd_check_status(s);
+ s->error = MC_ERR | (UNIT_ATTENTION << 4);
+ s->status = ERR_STAT;
+ s->nsector = 0;
+ ide_set_irq(s->bus);
+}
+/* ATAPI DMA support */
+
+static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret)
+{
+ IDEState *s = opaque;
+ int data_offset, n;
+
+ if (ret < 0) {
+ if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
+ if (s->bus->error_status) {
+ s->bus->dma->aiocb = NULL;
+ return;
+ }
+ goto eot;
+ }
+ }
+
+ if (s->io_buffer_size > 0) {
+ /*
+ * For a cdrom read sector command (s->lba != -1),
+ * adjust the lba for the next s->io_buffer_size chunk
+ * and dma the current chunk.
+ * For a command != read (s->lba == -1), just transfer
+ * the reply data.
+ */
+ if (s->lba != -1) {
+ if (s->cd_sector_size == 2352) {
+ n = 1;
+ cd_data_to_raw(s->io_buffer, s->lba);
+ } else {
+ n = s->io_buffer_size >> 11;
+ }
+ s->lba += n;
+ }
+ s->packet_transfer_size -= s->io_buffer_size;
+ if (s->bus->dma->ops->rw_buf(s->bus->dma, 1) == 0)
+ goto eot;
+ }
+
+ if (s->packet_transfer_size <= 0) {
+ s->status = READY_STAT | SEEK_STAT;
+ s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD;
+ ide_set_irq(s->bus);
+ goto eot;
+ }
+
+ s->io_buffer_index = 0;
+ if (s->cd_sector_size == 2352) {
+ n = 1;
+ s->io_buffer_size = s->cd_sector_size;
+ data_offset = 16;
+ } else {
+ n = s->packet_transfer_size >> 11;
+ if (n > (IDE_DMA_BUF_SECTORS / 4))
+ n = (IDE_DMA_BUF_SECTORS / 4);
+ s->io_buffer_size = n * 2048;
+ data_offset = 0;
+ }
+ trace_ide_atapi_cmd_read_dma_cb_aio(s, s->lba, n);
+ qemu_iovec_init_buf(&s->bus->dma->qiov, s->io_buffer + data_offset,
+ n * ATAPI_SECTOR_SIZE);
+
+ s->bus->dma->aiocb = ide_buffered_readv(s, (int64_t)s->lba << 2,
+ &s->bus->dma->qiov, n * 4,
+ ide_atapi_cmd_read_dma_cb, s);
+ return;
+
+eot:
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(s->blk), &s->acct);
+ } else {
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+ }
+ ide_set_inactive(s, false);
+}
+
+/* start a CD-CDROM read command with DMA */
+/* XXX: test if DMA is available */
+static void ide_atapi_cmd_read_dma(IDEState *s, int lba, int nb_sectors,
+ int sector_size)
+{
+ assert(0 <= lba && lba < (s->nb_sectors >> 2));
+
+ s->lba = lba;
+ s->packet_transfer_size = nb_sectors * sector_size;
+ s->io_buffer_size = 0;
+ s->cd_sector_size = sector_size;
+
+ block_acct_start(blk_get_stats(s->blk), &s->acct, s->packet_transfer_size,
+ BLOCK_ACCT_READ);
+
+ /* XXX: check if BUSY_STAT should be set */
+ s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
+ ide_start_dma(s, ide_atapi_cmd_read_dma_cb);
+}
+
+static void ide_atapi_cmd_read(IDEState *s, int lba, int nb_sectors,
+ int sector_size)
+{
+ trace_ide_atapi_cmd_read(s, s->atapi_dma ? "dma" : "pio",
+ lba, nb_sectors);
+ if (s->atapi_dma) {
+ ide_atapi_cmd_read_dma(s, lba, nb_sectors, sector_size);
+ } else {
+ ide_atapi_cmd_read_pio(s, lba, nb_sectors, sector_size);
+ }
+}
+
+void ide_atapi_dma_restart(IDEState *s)
+{
+ /*
+ * At this point we can just re-evaluate the packet command and start over.
+ * The presence of ->dma_cb callback in the pre_save ensures that the packet
+ * command has been completely sent and we can safely restart command.
+ */
+ s->unit = s->bus->retry_unit;
+ s->bus->dma->ops->restart_dma(s->bus->dma);
+ ide_atapi_cmd(s);
+}
+
+static inline uint8_t ide_atapi_set_profile(uint8_t *buf, uint8_t *index,
+ uint16_t profile)
+{
+ uint8_t *buf_profile = buf + 12; /* start of profiles */
+
+ buf_profile += ((*index) * 4); /* start of indexed profile */
+ stw_be_p(buf_profile, profile);
+ buf_profile[2] = ((buf_profile[0] == buf[6]) && (buf_profile[1] == buf[7]));
+
+ /* each profile adds 4 bytes to the response */
+ (*index)++;
+ buf[11] += 4; /* Additional Length */
+
+ return 4;
+}
+
+static int ide_dvd_read_structure(IDEState *s, int format,
+ const uint8_t *packet, uint8_t *buf)
+{
+ switch (format) {
+ case 0x0: /* Physical format information */
+ {
+ int layer = packet[6];
+ uint64_t total_sectors;
+
+ if (layer != 0)
+ return -ASC_INV_FIELD_IN_CMD_PACKET;
+
+ total_sectors = s->nb_sectors >> 2;
+ if (total_sectors == 0) {
+ return -ASC_MEDIUM_NOT_PRESENT;
+ }
+
+ buf[4] = 1; /* DVD-ROM, part version 1 */
+ buf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
+ buf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
+ buf[7] = 0; /* default densities */
+
+ /* FIXME: 0x30000 per spec? */
+ stl_be_p(buf + 8, 0); /* start sector */
+ stl_be_p(buf + 12, total_sectors - 1); /* end sector */
+ stl_be_p(buf + 16, total_sectors - 1); /* l0 end sector */
+
+ /* Size of buffer, not including 2 byte size field */
+ stw_be_p(buf, 2048 + 2);
+
+ /* 2k data + 4 byte header */
+ return (2048 + 4);
+ }
+
+ case 0x01: /* DVD copyright information */
+ buf[4] = 0; /* no copyright data */
+ buf[5] = 0; /* no region restrictions */
+
+ /* Size of buffer, not including 2 byte size field */
+ stw_be_p(buf, 4 + 2);
+
+ /* 4 byte header + 4 byte data */
+ return (4 + 4);
+
+ case 0x03: /* BCA information - invalid field for no BCA info */
+ return -ASC_INV_FIELD_IN_CMD_PACKET;
+
+ case 0x04: /* DVD disc manufacturing information */
+ /* Size of buffer, not including 2 byte size field */
+ stw_be_p(buf, 2048 + 2);
+
+ /* 2k data + 4 byte header */
+ return (2048 + 4);
+
+ case 0xff:
+ /*
+ * This lists all the command capabilities above. Add new ones
+ * in order and update the length and buffer return values.
+ */
+
+ buf[4] = 0x00; /* Physical format */
+ buf[5] = 0x40; /* Not writable, is readable */
+ stw_be_p(buf + 6, 2048 + 4);
+
+ buf[8] = 0x01; /* Copyright info */
+ buf[9] = 0x40; /* Not writable, is readable */
+ stw_be_p(buf + 10, 4 + 4);
+
+ buf[12] = 0x03; /* BCA info */
+ buf[13] = 0x40; /* Not writable, is readable */
+ stw_be_p(buf + 14, 188 + 4);
+
+ buf[16] = 0x04; /* Manufacturing info */
+ buf[17] = 0x40; /* Not writable, is readable */
+ stw_be_p(buf + 18, 2048 + 4);
+
+ /* Size of buffer, not including 2 byte size field */
+ stw_be_p(buf, 16 + 2);
+
+ /* data written + 4 byte header */
+ return (16 + 4);
+
+ default: /* TODO: formats beyond DVD-ROM requires */
+ return -ASC_INV_FIELD_IN_CMD_PACKET;
+ }
+}
+
+static unsigned int event_status_media(IDEState *s,
+ uint8_t *buf)
+{
+ uint8_t event_code, media_status;
+
+ media_status = 0;
+ if (s->tray_open) {
+ media_status = MS_TRAY_OPEN;
+ } else if (blk_is_inserted(s->blk)) {
+ media_status = MS_MEDIA_PRESENT;
+ }
+
+ /* Event notification descriptor */
+ event_code = MEC_NO_CHANGE;
+ if (media_status != MS_TRAY_OPEN) {
+ if (s->events.new_media) {
+ event_code = MEC_NEW_MEDIA;
+ s->events.new_media = false;
+ } else if (s->events.eject_request) {
+ event_code = MEC_EJECT_REQUESTED;
+ s->events.eject_request = false;
+ }
+ }
+
+ buf[4] = event_code;
+ buf[5] = media_status;
+
+ /* These fields are reserved, just clear them. */
+ buf[6] = 0;
+ buf[7] = 0;
+
+ return 8; /* We wrote to 4 extra bytes from the header */
+}
+
+/*
+ * Before transferring data or otherwise signalling acceptance of a command
+ * marked CONDDATA, we must check the validity of the byte_count_limit.
+ */
+static bool validate_bcl(IDEState *s)
+{
+ /* TODO: Check IDENTIFY data word 125 for defacult BCL (currently 0) */
+ if (s->atapi_dma || atapi_byte_count_limit(s)) {
+ return true;
+ }
+
+ /* TODO: Move abort back into core.c and introduce proper error flow between
+ * ATAPI layer and IDE core layer */
+ ide_abort_command(s);
+ return false;
+}
+
+static void cmd_get_event_status_notification(IDEState *s,
+ uint8_t *buf)
+{
+ const uint8_t *packet = buf;
+
+ struct {
+ uint8_t opcode;
+ uint8_t polled; /* lsb bit is polled; others are reserved */
+ uint8_t reserved2[2];
+ uint8_t class;
+ uint8_t reserved3[2];
+ uint16_t len;
+ uint8_t control;
+ } QEMU_PACKED *gesn_cdb;
+
+ struct {
+ uint16_t len;
+ uint8_t notification_class;
+ uint8_t supported_events;
+ } QEMU_PACKED *gesn_event_header;
+ unsigned int max_len, used_len;
+
+ gesn_cdb = (void *)packet;
+ gesn_event_header = (void *)buf;
+
+ max_len = be16_to_cpu(gesn_cdb->len);
+
+ /* It is fine by the MMC spec to not support async mode operations */
+ if (!(gesn_cdb->polled & 0x01)) { /* asynchronous mode */
+ /* Only polling is supported, asynchronous mode is not. */
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST,
+ ASC_INV_FIELD_IN_CMD_PACKET);
+ return;
+ }
+
+ /* polling mode operation */
+
+ /*
+ * These are the supported events.
+ *
+ * We currently only support requests of the 'media' type.
+ * Notification class requests and supported event classes are bitmasks,
+ * but they are build from the same values as the "notification class"
+ * field.
+ */
+ gesn_event_header->supported_events = 1 << GESN_MEDIA;
+
+ /*
+ * We use |= below to set the class field; other bits in this byte
+ * are reserved now but this is useful to do if we have to use the
+ * reserved fields later.
+ */
+ gesn_event_header->notification_class = 0;
+
+ /*
+ * Responses to requests are to be based on request priority. The
+ * notification_class_request_type enum above specifies the
+ * priority: upper elements are higher prio than lower ones.
+ */
+ if (gesn_cdb->class & (1 << GESN_MEDIA)) {
+ gesn_event_header->notification_class |= GESN_MEDIA;
+ used_len = event_status_media(s, buf);
+ } else {
+ gesn_event_header->notification_class = 0x80; /* No event available */
+ used_len = sizeof(*gesn_event_header);
+ }
+ gesn_event_header->len = cpu_to_be16(used_len
+ - sizeof(*gesn_event_header));
+ ide_atapi_cmd_reply(s, used_len, max_len);
+}
+
+static void cmd_request_sense(IDEState *s, uint8_t *buf)
+{
+ int max_len = buf[4];
+
+ memset(buf, 0, 18);
+ buf[0] = 0x70 | (1 << 7);
+ buf[2] = s->sense_key;
+ buf[7] = 10;
+ buf[12] = s->asc;
+
+ if (s->sense_key == UNIT_ATTENTION) {
+ s->sense_key = NO_SENSE;
+ }
+
+ ide_atapi_cmd_reply(s, 18, max_len);
+}
+
+static void cmd_inquiry(IDEState *s, uint8_t *buf)
+{
+ uint8_t page_code = buf[2];
+ int max_len = buf[4];
+
+ unsigned idx = 0;
+ unsigned size_idx;
+ unsigned preamble_len;
+
+ /* If the EVPD (Enable Vital Product Data) bit is set in byte 1,
+ * we are being asked for a specific page of info indicated by byte 2. */
+ if (buf[1] & 0x01) {
+ preamble_len = 4;
+ size_idx = 3;
+
+ buf[idx++] = 0x05; /* CD-ROM */
+ buf[idx++] = page_code; /* Page Code */
+ buf[idx++] = 0x00; /* reserved */
+ idx++; /* length (set later) */
+
+ switch (page_code) {
+ case 0x00:
+ /* Supported Pages: List of supported VPD responses. */
+ buf[idx++] = 0x00; /* 0x00: Supported Pages, and: */
+ buf[idx++] = 0x83; /* 0x83: Device Identification. */
+ break;
+
+ case 0x83:
+ /* Device Identification. Each entry is optional, but the entries
+ * included here are modeled after libata's VPD responses.
+ * If the response is given, at least one entry must be present. */
+
+ /* Entry 1: Serial */
+ if (idx + 24 > max_len) {
+ /* Not enough room for even the first entry: */
+ /* 4 byte header + 20 byte string */
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST,
+ ASC_DATA_PHASE_ERROR);
+ return;
+ }
+ buf[idx++] = 0x02; /* Ascii */
+ buf[idx++] = 0x00; /* Vendor Specific */
+ buf[idx++] = 0x00;
+ buf[idx++] = 20; /* Remaining length */
+ padstr8(buf + idx, 20, s->drive_serial_str);
+ idx += 20;
+
+ /* Entry 2: Drive Model and Serial */
+ if (idx + 72 > max_len) {
+ /* 4 (header) + 8 (vendor) + 60 (model & serial) */
+ goto out;
+ }
+ buf[idx++] = 0x02; /* Ascii */
+ buf[idx++] = 0x01; /* T10 Vendor */
+ buf[idx++] = 0x00;
+ buf[idx++] = 68;
+ padstr8(buf + idx, 8, "ATA"); /* Generic T10 vendor */
+ idx += 8;
+ padstr8(buf + idx, 40, s->drive_model_str);
+ idx += 40;
+ padstr8(buf + idx, 20, s->drive_serial_str);
+ idx += 20;
+
+ /* Entry 3: WWN */
+ if (s->wwn && (idx + 12 <= max_len)) {
+ /* 4 byte header + 8 byte wwn */
+ buf[idx++] = 0x01; /* Binary */
+ buf[idx++] = 0x03; /* NAA */
+ buf[idx++] = 0x00;
+ buf[idx++] = 0x08;
+ stq_be_p(&buf[idx], s->wwn);
+ idx += 8;
+ }
+ break;
+
+ default:
+ /* SPC-3, revision 23 sec. 6.4 */
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST,
+ ASC_INV_FIELD_IN_CMD_PACKET);
+ return;
+ }
+ } else {
+ preamble_len = 5;
+ size_idx = 4;
+
+ buf[0] = 0x05; /* CD-ROM */
+ buf[1] = 0x80; /* removable */
+ buf[2] = 0x00; /* ISO */
+ buf[3] = 0x21; /* ATAPI-2 (XXX: put ATAPI-4 ?) */
+ /* buf[size_idx] set below. */
+ buf[5] = 0; /* reserved */
+ buf[6] = 0; /* reserved */
+ buf[7] = 0; /* reserved */
+ padstr8(buf + 8, 8, "QEMU");
+ padstr8(buf + 16, 16, "QEMU DVD-ROM");
+ padstr8(buf + 32, 4, s->version);
+ idx = 36;
+ }
+
+ out:
+ buf[size_idx] = idx - preamble_len;
+ ide_atapi_cmd_reply(s, idx, max_len);
+}
+
+static void cmd_get_configuration(IDEState *s, uint8_t *buf)
+{
+ uint32_t len;
+ uint8_t index = 0;
+ int max_len;
+
+ /* only feature 0 is supported */
+ if (buf[2] != 0 || buf[3] != 0) {
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST,
+ ASC_INV_FIELD_IN_CMD_PACKET);
+ return;
+ }
+
+ /* XXX: could result in alignment problems in some architectures */
+ max_len = lduw_be_p(buf + 7);
+
+ /*
+ * XXX: avoid overflow for io_buffer if max_len is bigger than
+ * the size of that buffer (dimensioned to max number of
+ * sectors to transfer at once)
+ *
+ * Only a problem if the feature/profiles grow.
+ */
+ if (max_len > BDRV_SECTOR_SIZE) {
+ /* XXX: assume 1 sector */
+ max_len = BDRV_SECTOR_SIZE;
+ }
+
+ memset(buf, 0, max_len);
+ /*
+ * the number of sectors from the media tells us which profile
+ * to use as current. 0 means there is no media
+ */
+ if (media_is_dvd(s)) {
+ stw_be_p(buf + 6, MMC_PROFILE_DVD_ROM);
+ } else if (media_is_cd(s)) {
+ stw_be_p(buf + 6, MMC_PROFILE_CD_ROM);
+ }
+
+ buf[10] = 0x02 | 0x01; /* persistent and current */
+ len = 12; /* headers: 8 + 4 */
+ len += ide_atapi_set_profile(buf, &index, MMC_PROFILE_DVD_ROM);
+ len += ide_atapi_set_profile(buf, &index, MMC_PROFILE_CD_ROM);
+ stl_be_p(buf, len - 4); /* data length */
+
+ ide_atapi_cmd_reply(s, len, max_len);
+}
+
+static void cmd_mode_sense(IDEState *s, uint8_t *buf)
+{
+ int action, code;
+ int max_len;
+
+ max_len = lduw_be_p(buf + 7);
+ action = buf[2] >> 6;
+ code = buf[2] & 0x3f;
+
+ switch(action) {
+ case 0: /* current values */
+ switch(code) {
+ case MODE_PAGE_R_W_ERROR: /* error recovery */
+ stw_be_p(&buf[0], 16 - 2);
+ buf[2] = 0x70;
+ buf[3] = 0;
+ buf[4] = 0;
+ buf[5] = 0;
+ buf[6] = 0;
+ buf[7] = 0;
+
+ buf[8] = MODE_PAGE_R_W_ERROR;
+ buf[9] = 16 - 10;
+ buf[10] = 0x00;
+ buf[11] = 0x05;
+ buf[12] = 0x00;
+ buf[13] = 0x00;
+ buf[14] = 0x00;
+ buf[15] = 0x00;
+ ide_atapi_cmd_reply(s, 16, max_len);
+ break;
+ case MODE_PAGE_AUDIO_CTL:
+ stw_be_p(&buf[0], 24 - 2);
+ buf[2] = 0x70;
+ buf[3] = 0;
+ buf[4] = 0;
+ buf[5] = 0;
+ buf[6] = 0;
+ buf[7] = 0;
+
+ buf[8] = MODE_PAGE_AUDIO_CTL;
+ buf[9] = 24 - 10;
+ /* Fill with CDROM audio volume */
+ buf[17] = 0;
+ buf[19] = 0;
+ buf[21] = 0;
+ buf[23] = 0;
+
+ ide_atapi_cmd_reply(s, 24, max_len);
+ break;
+ case MODE_PAGE_CAPABILITIES:
+ stw_be_p(&buf[0], 30 - 2);
+ buf[2] = 0x70;
+ buf[3] = 0;
+ buf[4] = 0;
+ buf[5] = 0;
+ buf[6] = 0;
+ buf[7] = 0;
+
+ buf[8] = MODE_PAGE_CAPABILITIES;
+ buf[9] = 30 - 10;
+ buf[10] = 0x3b; /* read CDR/CDRW/DVDROM/DVDR/DVDRAM */
+ buf[11] = 0x00;
+
+ /* Claim PLAY_AUDIO capability (0x01) since some Linux
+ code checks for this to automount media. */
+ buf[12] = 0x71;
+ buf[13] = 3 << 5;
+ buf[14] = (1 << 0) | (1 << 3) | (1 << 5);
+ if (s->tray_locked) {
+ buf[14] |= 1 << 1;
+ }
+ buf[15] = 0x00; /* No volume & mute control, no changer */
+ stw_be_p(&buf[16], 704); /* 4x read speed */
+ buf[18] = 0; /* Two volume levels */
+ buf[19] = 2;
+ stw_be_p(&buf[20], 512); /* 512k buffer */
+ stw_be_p(&buf[22], 704); /* 4x read speed current */
+ buf[24] = 0;
+ buf[25] = 0;
+ buf[26] = 0;
+ buf[27] = 0;
+ buf[28] = 0;
+ buf[29] = 0;
+ ide_atapi_cmd_reply(s, 30, max_len);
+ break;
+ default:
+ goto error_cmd;
+ }
+ break;
+ case 1: /* changeable values */
+ goto error_cmd;
+ case 2: /* default values */
+ goto error_cmd;
+ default:
+ case 3: /* saved values */
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST,
+ ASC_SAVING_PARAMETERS_NOT_SUPPORTED);
+ break;
+ }
+ return;
+
+error_cmd:
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_INV_FIELD_IN_CMD_PACKET);
+}
+
+static void cmd_test_unit_ready(IDEState *s, uint8_t *buf)
+{
+ /* Not Ready Conditions are already handled in ide_atapi_cmd(), so if we
+ * come here, we know that it's ready. */
+ ide_atapi_cmd_ok(s);
+}
+
+static void cmd_prevent_allow_medium_removal(IDEState *s, uint8_t* buf)
+{
+ s->tray_locked = buf[4] & 1;
+ blk_lock_medium(s->blk, buf[4] & 1);
+ ide_atapi_cmd_ok(s);
+}
+
+static void cmd_read(IDEState *s, uint8_t* buf)
+{
+ unsigned int nb_sectors, lba;
+
+ /* Total logical sectors of ATAPI_SECTOR_SIZE(=2048) bytes */
+ uint64_t total_sectors = s->nb_sectors >> 2;
+
+ if (buf[0] == GPCMD_READ_10) {
+ nb_sectors = lduw_be_p(buf + 7);
+ } else {
+ nb_sectors = ldl_be_p(buf + 6);
+ }
+ if (nb_sectors == 0) {
+ ide_atapi_cmd_ok(s);
+ return;
+ }
+
+ lba = ldl_be_p(buf + 2);
+ if (lba >= total_sectors || lba + nb_sectors - 1 >= total_sectors) {
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_LOGICAL_BLOCK_OOR);
+ return;
+ }
+
+ ide_atapi_cmd_read(s, lba, nb_sectors, 2048);
+}
+
+static void cmd_read_cd(IDEState *s, uint8_t* buf)
+{
+ unsigned int nb_sectors, lba, transfer_request;
+
+ /* Total logical sectors of ATAPI_SECTOR_SIZE(=2048) bytes */
+ uint64_t total_sectors = s->nb_sectors >> 2;
+
+ nb_sectors = (buf[6] << 16) | (buf[7] << 8) | buf[8];
+ if (nb_sectors == 0) {
+ ide_atapi_cmd_ok(s);
+ return;
+ }
+
+ lba = ldl_be_p(buf + 2);
+ if (lba >= total_sectors || lba + nb_sectors - 1 >= total_sectors) {
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_LOGICAL_BLOCK_OOR);
+ return;
+ }
+
+ transfer_request = buf[9] & 0xf8;
+ if (transfer_request == 0x00) {
+ /* nothing */
+ ide_atapi_cmd_ok(s);
+ return;
+ }
+
+ /* Check validity of BCL before transferring data */
+ if (!validate_bcl(s)) {
+ return;
+ }
+
+ switch (transfer_request) {
+ case 0x10:
+ /* normal read */
+ ide_atapi_cmd_read(s, lba, nb_sectors, 2048);
+ break;
+ case 0xf8:
+ /* read all data */
+ ide_atapi_cmd_read(s, lba, nb_sectors, 2352);
+ break;
+ default:
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST,
+ ASC_INV_FIELD_IN_CMD_PACKET);
+ break;
+ }
+}
+
+static void cmd_seek(IDEState *s, uint8_t* buf)
+{
+ unsigned int lba;
+ uint64_t total_sectors = s->nb_sectors >> 2;
+
+ lba = ldl_be_p(buf + 2);
+ if (lba >= total_sectors) {
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_LOGICAL_BLOCK_OOR);
+ return;
+ }
+
+ ide_atapi_cmd_ok(s);
+}
+
+static void cmd_start_stop_unit(IDEState *s, uint8_t* buf)
+{
+ int sense;
+ bool start = buf[4] & 1;
+ bool loej = buf[4] & 2; /* load on start, eject on !start */
+ int pwrcnd = buf[4] & 0xf0;
+
+ if (pwrcnd) {
+ /* eject/load only happens for power condition == 0 */
+ ide_atapi_cmd_ok(s);
+ return;
+ }
+
+ if (loej) {
+ if (!start && !s->tray_open && s->tray_locked) {
+ sense = blk_is_inserted(s->blk)
+ ? NOT_READY : ILLEGAL_REQUEST;
+ ide_atapi_cmd_error(s, sense, ASC_MEDIA_REMOVAL_PREVENTED);
+ return;
+ }
+
+ if (s->tray_open != !start) {
+ blk_eject(s->blk, !start);
+ s->tray_open = !start;
+ }
+ }
+
+ ide_atapi_cmd_ok(s);
+}
+
+static void cmd_mechanism_status(IDEState *s, uint8_t* buf)
+{
+ int max_len = lduw_be_p(buf + 8);
+
+ stw_be_p(buf, 0);
+ /* no current LBA */
+ buf[2] = 0;
+ buf[3] = 0;
+ buf[4] = 0;
+ buf[5] = 1;
+ stw_be_p(buf + 6, 0);
+ ide_atapi_cmd_reply(s, 8, max_len);
+}
+
+static void cmd_read_toc_pma_atip(IDEState *s, uint8_t* buf)
+{
+ int format, msf, start_track, len;
+ int max_len;
+ uint64_t total_sectors = s->nb_sectors >> 2;
+
+ max_len = lduw_be_p(buf + 7);
+ format = buf[9] >> 6;
+ msf = (buf[1] >> 1) & 1;
+ start_track = buf[6];
+
+ switch(format) {
+ case 0:
+ len = cdrom_read_toc(total_sectors, buf, msf, start_track);
+ if (len < 0)
+ goto error_cmd;
+ ide_atapi_cmd_reply(s, len, max_len);
+ break;
+ case 1:
+ /* multi session : only a single session defined */
+ memset(buf, 0, 12);
+ buf[1] = 0x0a;
+ buf[2] = 0x01;
+ buf[3] = 0x01;
+ ide_atapi_cmd_reply(s, 12, max_len);
+ break;
+ case 2:
+ len = cdrom_read_toc_raw(total_sectors, buf, msf, start_track);
+ if (len < 0)
+ goto error_cmd;
+ ide_atapi_cmd_reply(s, len, max_len);
+ break;
+ default:
+ error_cmd:
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST,
+ ASC_INV_FIELD_IN_CMD_PACKET);
+ }
+}
+
+static void cmd_read_cdvd_capacity(IDEState *s, uint8_t* buf)
+{
+ uint64_t total_sectors = s->nb_sectors >> 2;
+
+ /* NOTE: it is really the number of sectors minus 1 */
+ stl_be_p(buf, total_sectors - 1);
+ stl_be_p(buf + 4, 2048);
+ ide_atapi_cmd_reply(s, 8, 8);
+}
+
+static void cmd_read_disc_information(IDEState *s, uint8_t* buf)
+{
+ uint8_t type = buf[1] & 7;
+ uint32_t max_len = lduw_be_p(buf + 7);
+
+ /* Types 1/2 are only defined for Blu-Ray. */
+ if (type != 0) {
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST,
+ ASC_INV_FIELD_IN_CMD_PACKET);
+ return;
+ }
+
+ memset(buf, 0, 34);
+ buf[1] = 32;
+ buf[2] = 0xe; /* last session complete, disc finalized */
+ buf[3] = 1; /* first track on disc */
+ buf[4] = 1; /* # of sessions */
+ buf[5] = 1; /* first track of last session */
+ buf[6] = 1; /* last track of last session */
+ buf[7] = 0x20; /* unrestricted use */
+ buf[8] = 0x00; /* CD-ROM or DVD-ROM */
+ /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
+ /* 12-23: not meaningful for CD-ROM or DVD-ROM */
+ /* 24-31: disc bar code */
+ /* 32: disc application code */
+ /* 33: number of OPC tables */
+
+ ide_atapi_cmd_reply(s, 34, max_len);
+}
+
+static void cmd_read_dvd_structure(IDEState *s, uint8_t* buf)
+{
+ int max_len;
+ int media = buf[1];
+ int format = buf[7];
+ int ret;
+
+ max_len = lduw_be_p(buf + 8);
+
+ if (format < 0xff) {
+ if (media_is_cd(s)) {
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST,
+ ASC_INCOMPATIBLE_FORMAT);
+ return;
+ } else if (!media_present(s)) {
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST,
+ ASC_INV_FIELD_IN_CMD_PACKET);
+ return;
+ }
+ }
+
+ memset(buf, 0, max_len > IDE_DMA_BUF_SECTORS * BDRV_SECTOR_SIZE + 4 ?
+ IDE_DMA_BUF_SECTORS * BDRV_SECTOR_SIZE + 4 : max_len);
+
+ switch (format) {
+ case 0x00 ... 0x7f:
+ case 0xff:
+ if (media == 0) {
+ ret = ide_dvd_read_structure(s, format, buf, buf);
+
+ if (ret < 0) {
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST, -ret);
+ } else {
+ ide_atapi_cmd_reply(s, ret, max_len);
+ }
+
+ break;
+ }
+ /* TODO: BD support, fall through for now */
+
+ /* Generic disk structures */
+ case 0x80: /* TODO: AACS volume identifier */
+ case 0x81: /* TODO: AACS media serial number */
+ case 0x82: /* TODO: AACS media identifier */
+ case 0x83: /* TODO: AACS media key block */
+ case 0x90: /* TODO: List of recognized format layers */
+ case 0xc0: /* TODO: Write protection status */
+ default:
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST,
+ ASC_INV_FIELD_IN_CMD_PACKET);
+ break;
+ }
+}
+
+static void cmd_set_speed(IDEState *s, uint8_t* buf)
+{
+ ide_atapi_cmd_ok(s);
+}
+
+enum {
+ /*
+ * Only commands flagged as ALLOW_UA are allowed to run under a
+ * unit attention condition. (See MMC-5, section 4.1.6.1)
+ */
+ ALLOW_UA = 0x01,
+
+ /*
+ * Commands flagged with CHECK_READY can only execute if a medium is present.
+ * Otherwise they report the Not Ready Condition. (See MMC-5, section
+ * 4.1.8)
+ */
+ CHECK_READY = 0x02,
+
+ /*
+ * Commands flagged with NONDATA do not in any circumstances return
+ * any data via ide_atapi_cmd_reply. These commands are exempt from
+ * the normal byte_count_limit constraints.
+ * See ATA8-ACS3 "7.21.5 Byte Count Limit"
+ */
+ NONDATA = 0x04,
+
+ /*
+ * CONDDATA implies a command that transfers data only conditionally based
+ * on the presence of suboptions. It should be exempt from the BCL check at
+ * command validation time, but it needs to be checked at the command
+ * handler level instead.
+ */
+ CONDDATA = 0x08,
+};
+
+static const struct AtapiCmd {
+ void (*handler)(IDEState *s, uint8_t *buf);
+ int flags;
+} atapi_cmd_table[0x100] = {
+ [ 0x00 ] = { cmd_test_unit_ready, CHECK_READY | NONDATA },
+ [ 0x03 ] = { cmd_request_sense, ALLOW_UA },
+ [ 0x12 ] = { cmd_inquiry, ALLOW_UA },
+ [ 0x1b ] = { cmd_start_stop_unit, NONDATA }, /* [1] */
+ [ 0x1e ] = { cmd_prevent_allow_medium_removal, NONDATA },
+ [ 0x25 ] = { cmd_read_cdvd_capacity, CHECK_READY },
+ [ 0x28 ] = { cmd_read, /* (10) */ CHECK_READY },
+ [ 0x2b ] = { cmd_seek, CHECK_READY | NONDATA },
+ [ 0x43 ] = { cmd_read_toc_pma_atip, CHECK_READY },
+ [ 0x46 ] = { cmd_get_configuration, ALLOW_UA },
+ [ 0x4a ] = { cmd_get_event_status_notification, ALLOW_UA },
+ [ 0x51 ] = { cmd_read_disc_information, CHECK_READY },
+ [ 0x5a ] = { cmd_mode_sense, /* (10) */ 0 },
+ [ 0xa8 ] = { cmd_read, /* (12) */ CHECK_READY },
+ [ 0xad ] = { cmd_read_dvd_structure, CHECK_READY },
+ [ 0xbb ] = { cmd_set_speed, NONDATA },
+ [ 0xbd ] = { cmd_mechanism_status, 0 },
+ [ 0xbe ] = { cmd_read_cd, CHECK_READY | CONDDATA },
+ /* [1] handler detects and reports not ready condition itself */
+};
+
+void ide_atapi_cmd(IDEState *s)
+{
+ uint8_t *buf = s->io_buffer;
+ const struct AtapiCmd *cmd = &atapi_cmd_table[s->io_buffer[0]];
+
+ trace_ide_atapi_cmd(s, s->io_buffer[0]);
+
+ if (trace_event_get_state_backends(TRACE_IDE_ATAPI_CMD_PACKET)) {
+ /* Each pretty-printed byte needs two bytes and a space; */
+ char *ppacket = g_malloc(ATAPI_PACKET_SIZE * 3 + 1);
+ int i;
+ for (i = 0; i < ATAPI_PACKET_SIZE; i++) {
+ sprintf(ppacket + (i * 3), "%02x ", buf[i]);
+ }
+ trace_ide_atapi_cmd_packet(s, s->lcyl | (s->hcyl << 8), ppacket);
+ g_free(ppacket);
+ }
+
+ /*
+ * If there's a UNIT_ATTENTION condition pending, only command flagged with
+ * ALLOW_UA are allowed to complete. with other commands getting a CHECK
+ * condition response unless a higher priority status, defined by the drive
+ * here, is pending.
+ */
+ if (s->sense_key == UNIT_ATTENTION && !(cmd->flags & ALLOW_UA)) {
+ ide_atapi_cmd_check_status(s);
+ return;
+ }
+ /*
+ * When a CD gets changed, we have to report an ejected state and
+ * then a loaded state to guests so that they detect tray
+ * open/close and media change events. Guests that do not use
+ * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
+ * states rely on this behavior.
+ */
+ if (!(cmd->flags & ALLOW_UA) &&
+ !s->tray_open && blk_is_inserted(s->blk) && s->cdrom_changed) {
+
+ if (s->cdrom_changed == 1) {
+ ide_atapi_cmd_error(s, NOT_READY, ASC_MEDIUM_NOT_PRESENT);
+ s->cdrom_changed = 2;
+ } else {
+ ide_atapi_cmd_error(s, UNIT_ATTENTION, ASC_MEDIUM_MAY_HAVE_CHANGED);
+ s->cdrom_changed = 0;
+ }
+
+ return;
+ }
+
+ /* Report a Not Ready condition if appropriate for the command */
+ if ((cmd->flags & CHECK_READY) &&
+ (!media_present(s) || !blk_is_inserted(s->blk)))
+ {
+ ide_atapi_cmd_error(s, NOT_READY, ASC_MEDIUM_NOT_PRESENT);
+ return;
+ }
+
+ /* Commands that don't transfer DATA permit the byte_count_limit to be 0.
+ * If this is a data-transferring PIO command and BCL is 0,
+ * we abort at the /ATA/ level, not the ATAPI level.
+ * See ATA8 ACS3 section 7.17.6.49 and 7.21.5 */
+ if (cmd->handler && !(cmd->flags & (NONDATA | CONDDATA))) {
+ if (!validate_bcl(s)) {
+ return;
+ }
+ }
+
+ /* Execute the command */
+ if (cmd->handler) {
+ cmd->handler(s, buf);
+ return;
+ }
+
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_ILLEGAL_OPCODE);
+}
diff --git a/hw/ide/cmd646.c b/hw/ide/cmd646.c
new file mode 100644
index 000000000..94c576262
--- /dev/null
+++ b/hw/ide/cmd646.c
@@ -0,0 +1,351 @@
+/*
+ * QEMU IDE Emulation: PCI cmd646 support.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/pci/pci.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "qemu/module.h"
+#include "hw/isa/isa.h"
+#include "sysemu/dma.h"
+#include "sysemu/reset.h"
+
+#include "hw/ide/pci.h"
+#include "trace.h"
+
+/* CMD646 specific */
+#define CFR 0x50
+#define CFR_INTR_CH0 0x04
+#define CNTRL 0x51
+#define CNTRL_EN_CH0 0x04
+#define CNTRL_EN_CH1 0x08
+#define ARTTIM23 0x57
+#define ARTTIM23_INTR_CH1 0x10
+#define MRDMODE 0x71
+#define MRDMODE_INTR_CH0 0x04
+#define MRDMODE_INTR_CH1 0x08
+#define MRDMODE_BLK_CH0 0x10
+#define MRDMODE_BLK_CH1 0x20
+#define UDIDETCR0 0x73
+#define UDIDETCR1 0x7B
+
+static void cmd646_update_irq(PCIDevice *pd);
+
+static void cmd646_update_dma_interrupts(PCIDevice *pd)
+{
+ /* Sync DMA interrupt status from UDMA interrupt status */
+ if (pd->config[MRDMODE] & MRDMODE_INTR_CH0) {
+ pd->config[CFR] |= CFR_INTR_CH0;
+ } else {
+ pd->config[CFR] &= ~CFR_INTR_CH0;
+ }
+
+ if (pd->config[MRDMODE] & MRDMODE_INTR_CH1) {
+ pd->config[ARTTIM23] |= ARTTIM23_INTR_CH1;
+ } else {
+ pd->config[ARTTIM23] &= ~ARTTIM23_INTR_CH1;
+ }
+}
+
+static void cmd646_update_udma_interrupts(PCIDevice *pd)
+{
+ /* Sync UDMA interrupt status from DMA interrupt status */
+ if (pd->config[CFR] & CFR_INTR_CH0) {
+ pd->config[MRDMODE] |= MRDMODE_INTR_CH0;
+ } else {
+ pd->config[MRDMODE] &= ~MRDMODE_INTR_CH0;
+ }
+
+ if (pd->config[ARTTIM23] & ARTTIM23_INTR_CH1) {
+ pd->config[MRDMODE] |= MRDMODE_INTR_CH1;
+ } else {
+ pd->config[MRDMODE] &= ~MRDMODE_INTR_CH1;
+ }
+}
+
+static uint64_t bmdma_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ BMDMAState *bm = opaque;
+ PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
+ uint32_t val;
+
+ if (size != 1) {
+ return ((uint64_t)1 << (size * 8)) - 1;
+ }
+
+ switch(addr & 3) {
+ case 0:
+ val = bm->cmd;
+ break;
+ case 1:
+ val = pci_dev->config[MRDMODE];
+ break;
+ case 2:
+ val = bm->status;
+ break;
+ case 3:
+ if (bm == &bm->pci_dev->bmdma[0]) {
+ val = pci_dev->config[UDIDETCR0];
+ } else {
+ val = pci_dev->config[UDIDETCR1];
+ }
+ break;
+ default:
+ val = 0xff;
+ break;
+ }
+
+ trace_bmdma_read_cmd646(addr, val);
+ return val;
+}
+
+static void bmdma_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ BMDMAState *bm = opaque;
+ PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
+
+ if (size != 1) {
+ return;
+ }
+
+ trace_bmdma_write_cmd646(addr, val);
+ switch(addr & 3) {
+ case 0:
+ bmdma_cmd_writeb(bm, val);
+ break;
+ case 1:
+ pci_dev->config[MRDMODE] =
+ (pci_dev->config[MRDMODE] & ~0x30) | (val & 0x30);
+ cmd646_update_dma_interrupts(pci_dev);
+ cmd646_update_irq(pci_dev);
+ break;
+ case 2:
+ bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06);
+ break;
+ case 3:
+ if (bm == &bm->pci_dev->bmdma[0]) {
+ pci_dev->config[UDIDETCR0] = val;
+ } else {
+ pci_dev->config[UDIDETCR1] = val;
+ }
+ break;
+ }
+}
+
+static const MemoryRegionOps cmd646_bmdma_ops = {
+ .read = bmdma_read,
+ .write = bmdma_write,
+};
+
+static void bmdma_setup_bar(PCIIDEState *d)
+{
+ BMDMAState *bm;
+ int i;
+
+ memory_region_init(&d->bmdma_bar, OBJECT(d), "cmd646-bmdma", 16);
+ for(i = 0;i < 2; i++) {
+ bm = &d->bmdma[i];
+ memory_region_init_io(&bm->extra_io, OBJECT(d), &cmd646_bmdma_ops, bm,
+ "cmd646-bmdma-bus", 4);
+ memory_region_add_subregion(&d->bmdma_bar, i * 8, &bm->extra_io);
+ memory_region_init_io(&bm->addr_ioport, OBJECT(d),
+ &bmdma_addr_ioport_ops, bm,
+ "cmd646-bmdma-ioport", 4);
+ memory_region_add_subregion(&d->bmdma_bar, i * 8 + 4, &bm->addr_ioport);
+ }
+}
+
+static void cmd646_update_irq(PCIDevice *pd)
+{
+ int pci_level;
+
+ pci_level = ((pd->config[MRDMODE] & MRDMODE_INTR_CH0) &&
+ !(pd->config[MRDMODE] & MRDMODE_BLK_CH0)) ||
+ ((pd->config[MRDMODE] & MRDMODE_INTR_CH1) &&
+ !(pd->config[MRDMODE] & MRDMODE_BLK_CH1));
+ pci_set_irq(pd, pci_level);
+}
+
+/* the PCI irq level is the logical OR of the two channels */
+static void cmd646_set_irq(void *opaque, int channel, int level)
+{
+ PCIIDEState *d = opaque;
+ PCIDevice *pd = PCI_DEVICE(d);
+ int irq_mask;
+
+ irq_mask = MRDMODE_INTR_CH0 << channel;
+ if (level) {
+ pd->config[MRDMODE] |= irq_mask;
+ } else {
+ pd->config[MRDMODE] &= ~irq_mask;
+ }
+ cmd646_update_dma_interrupts(pd);
+ cmd646_update_irq(pd);
+}
+
+static void cmd646_reset(DeviceState *dev)
+{
+ PCIIDEState *d = PCI_IDE(dev);
+ unsigned int i;
+
+ for (i = 0; i < 2; i++) {
+ ide_bus_reset(&d->bus[i]);
+ }
+}
+
+static uint32_t cmd646_pci_config_read(PCIDevice *d,
+ uint32_t address, int len)
+{
+ return pci_default_read_config(d, address, len);
+}
+
+static void cmd646_pci_config_write(PCIDevice *d, uint32_t addr, uint32_t val,
+ int l)
+{
+ uint32_t i;
+
+ pci_default_write_config(d, addr, val, l);
+
+ for (i = addr; i < addr + l; i++) {
+ switch (i) {
+ case CFR:
+ case ARTTIM23:
+ cmd646_update_udma_interrupts(d);
+ break;
+ case MRDMODE:
+ cmd646_update_dma_interrupts(d);
+ break;
+ }
+ }
+
+ cmd646_update_irq(d);
+}
+
+/* CMD646 PCI IDE controller */
+static void pci_cmd646_ide_realize(PCIDevice *dev, Error **errp)
+{
+ PCIIDEState *d = PCI_IDE(dev);
+ DeviceState *ds = DEVICE(dev);
+ uint8_t *pci_conf = dev->config;
+ int i;
+
+ pci_conf[PCI_CLASS_PROG] = 0x8f;
+
+ pci_conf[CNTRL] = CNTRL_EN_CH0; // enable IDE0
+ if (d->secondary) {
+ /* XXX: if not enabled, really disable the seconday IDE controller */
+ pci_conf[CNTRL] |= CNTRL_EN_CH1; /* enable IDE1 */
+ }
+
+ /* Set write-to-clear interrupt bits */
+ dev->wmask[CFR] = 0x0;
+ dev->w1cmask[CFR] = CFR_INTR_CH0;
+ dev->wmask[ARTTIM23] = 0x0;
+ dev->w1cmask[ARTTIM23] = ARTTIM23_INTR_CH1;
+ dev->wmask[MRDMODE] = 0x0;
+ dev->w1cmask[MRDMODE] = MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1;
+
+ memory_region_init_io(&d->data_bar[0], OBJECT(d), &pci_ide_data_le_ops,
+ &d->bus[0], "cmd646-data0", 8);
+ pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->data_bar[0]);
+
+ memory_region_init_io(&d->cmd_bar[0], OBJECT(d), &pci_ide_cmd_le_ops,
+ &d->bus[0], "cmd646-cmd0", 4);
+ pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd_bar[0]);
+
+ memory_region_init_io(&d->data_bar[1], OBJECT(d), &pci_ide_data_le_ops,
+ &d->bus[1], "cmd646-data1", 8);
+ pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_IO, &d->data_bar[1]);
+
+ memory_region_init_io(&d->cmd_bar[1], OBJECT(d), &pci_ide_cmd_le_ops,
+ &d->bus[1], "cmd646-cmd1", 4);
+ pci_register_bar(dev, 3, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd_bar[1]);
+
+ bmdma_setup_bar(d);
+ pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar);
+
+ /* TODO: RST# value should be 0 */
+ pci_conf[PCI_INTERRUPT_PIN] = 0x01; // interrupt on pin 1
+
+ qdev_init_gpio_in(ds, cmd646_set_irq, 2);
+ for (i = 0; i < 2; i++) {
+ ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2);
+ ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i));
+
+ bmdma_init(&d->bus[i], &d->bmdma[i], d);
+ d->bmdma[i].bus = &d->bus[i];
+ ide_register_restart_cb(&d->bus[i]);
+ }
+}
+
+static void pci_cmd646_ide_exitfn(PCIDevice *dev)
+{
+ PCIIDEState *d = PCI_IDE(dev);
+ unsigned i;
+
+ for (i = 0; i < 2; ++i) {
+ memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].extra_io);
+ memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].addr_ioport);
+ }
+}
+
+static Property cmd646_ide_properties[] = {
+ DEFINE_PROP_UINT32("secondary", PCIIDEState, secondary, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void cmd646_ide_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ dc->reset = cmd646_reset;
+ dc->vmsd = &vmstate_ide_pci;
+ k->realize = pci_cmd646_ide_realize;
+ k->exit = pci_cmd646_ide_exitfn;
+ k->vendor_id = PCI_VENDOR_ID_CMD;
+ k->device_id = PCI_DEVICE_ID_CMD_646;
+ k->revision = 0x07;
+ k->class_id = PCI_CLASS_STORAGE_IDE;
+ k->config_read = cmd646_pci_config_read;
+ k->config_write = cmd646_pci_config_write;
+ device_class_set_props(dc, cmd646_ide_properties);
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo cmd646_ide_info = {
+ .name = "cmd646-ide",
+ .parent = TYPE_PCI_IDE,
+ .class_init = cmd646_ide_class_init,
+};
+
+static void cmd646_ide_register_types(void)
+{
+ type_register_static(&cmd646_ide_info);
+}
+
+type_init(cmd646_ide_register_types)
diff --git a/hw/ide/core.c b/hw/ide/core.c
new file mode 100644
index 000000000..e28f8aad6
--- /dev/null
+++ b/hw/ide/core.c
@@ -0,0 +1,2946 @@
+/*
+ * QEMU IDE disk and CD/DVD-ROM Emulator
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/isa/isa.h"
+#include "migration/vmstate.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/blockdev.h"
+#include "sysemu/dma.h"
+#include "hw/block/block.h"
+#include "sysemu/block-backend.h"
+#include "qapi/error.h"
+#include "qemu/cutils.h"
+#include "sysemu/replay.h"
+#include "sysemu/runstate.h"
+#include "hw/ide/internal.h"
+#include "trace.h"
+
+/* These values were based on a Seagate ST3500418AS but have been modified
+ to make more sense in QEMU */
+static const int smart_attributes[][12] = {
+ /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
+ /* raw read error rate*/
+ { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
+ /* spin up */
+ { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ /* start stop count */
+ { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
+ /* remapped sectors */
+ { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
+ /* power on hours */
+ { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ /* power cycle count */
+ { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ /* airflow-temperature-celsius */
+ { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
+};
+
+const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
+ [IDE_DMA_READ] = "DMA READ",
+ [IDE_DMA_WRITE] = "DMA WRITE",
+ [IDE_DMA_TRIM] = "DMA TRIM",
+ [IDE_DMA_ATAPI] = "DMA ATAPI"
+};
+
+static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
+{
+ if ((unsigned)enval < IDE_DMA__COUNT) {
+ return IDE_DMA_CMD_lookup[enval];
+ }
+ return "DMA UNKNOWN CMD";
+}
+
+static void ide_dummy_transfer_stop(IDEState *s);
+
+static void padstr(char *str, const char *src, int len)
+{
+ int i, v;
+ for(i = 0; i < len; i++) {
+ if (*src)
+ v = *src++;
+ else
+ v = ' ';
+ str[i^1] = v;
+ }
+}
+
+static void put_le16(uint16_t *p, unsigned int v)
+{
+ *p = cpu_to_le16(v);
+}
+
+static void ide_identify_size(IDEState *s)
+{
+ uint16_t *p = (uint16_t *)s->identify_data;
+ int64_t nb_sectors_lba28 = s->nb_sectors;
+ if (nb_sectors_lba28 >= 1 << 28) {
+ nb_sectors_lba28 = (1 << 28) - 1;
+ }
+ put_le16(p + 60, nb_sectors_lba28);
+ put_le16(p + 61, nb_sectors_lba28 >> 16);
+ put_le16(p + 100, s->nb_sectors);
+ put_le16(p + 101, s->nb_sectors >> 16);
+ put_le16(p + 102, s->nb_sectors >> 32);
+ put_le16(p + 103, s->nb_sectors >> 48);
+}
+
+static void ide_identify(IDEState *s)
+{
+ uint16_t *p;
+ unsigned int oldsize;
+ IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
+
+ p = (uint16_t *)s->identify_data;
+ if (s->identify_set) {
+ goto fill_buffer;
+ }
+ memset(p, 0, sizeof(s->identify_data));
+
+ put_le16(p + 0, 0x0040);
+ put_le16(p + 1, s->cylinders);
+ put_le16(p + 3, s->heads);
+ put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
+ put_le16(p + 5, 512); /* XXX: retired, remove ? */
+ put_le16(p + 6, s->sectors);
+ padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
+ put_le16(p + 20, 3); /* XXX: retired, remove ? */
+ put_le16(p + 21, 512); /* cache size in sectors */
+ put_le16(p + 22, 4); /* ecc bytes */
+ padstr((char *)(p + 23), s->version, 8); /* firmware version */
+ padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
+#if MAX_MULT_SECTORS > 1
+ put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
+#endif
+ put_le16(p + 48, 1); /* dword I/O */
+ put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
+ put_le16(p + 51, 0x200); /* PIO transfer cycle */
+ put_le16(p + 52, 0x200); /* DMA transfer cycle */
+ put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
+ put_le16(p + 54, s->cylinders);
+ put_le16(p + 55, s->heads);
+ put_le16(p + 56, s->sectors);
+ oldsize = s->cylinders * s->heads * s->sectors;
+ put_le16(p + 57, oldsize);
+ put_le16(p + 58, oldsize >> 16);
+ if (s->mult_sectors)
+ put_le16(p + 59, 0x100 | s->mult_sectors);
+ /* *(p + 60) := nb_sectors -- see ide_identify_size */
+ /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
+ put_le16(p + 62, 0x07); /* single word dma0-2 supported */
+ put_le16(p + 63, 0x07); /* mdma0-2 supported */
+ put_le16(p + 64, 0x03); /* pio3-4 supported */
+ put_le16(p + 65, 120);
+ put_le16(p + 66, 120);
+ put_le16(p + 67, 120);
+ put_le16(p + 68, 120);
+ if (dev && dev->conf.discard_granularity) {
+ put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
+ }
+
+ if (s->ncq_queues) {
+ put_le16(p + 75, s->ncq_queues - 1);
+ /* NCQ supported */
+ put_le16(p + 76, (1 << 8));
+ }
+
+ put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
+ put_le16(p + 81, 0x16); /* conforms to ata5 */
+ /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
+ put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
+ /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
+ put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
+ /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
+ if (s->wwn) {
+ put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
+ } else {
+ put_le16(p + 84, (1 << 14) | 0);
+ }
+ /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
+ if (blk_enable_write_cache(s->blk)) {
+ put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
+ } else {
+ put_le16(p + 85, (1 << 14) | 1);
+ }
+ /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
+ put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
+ /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
+ if (s->wwn) {
+ put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
+ } else {
+ put_le16(p + 87, (1 << 14) | 0);
+ }
+ put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
+ put_le16(p + 93, 1 | (1 << 14) | 0x2000);
+ /* *(p + 100) := nb_sectors -- see ide_identify_size */
+ /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
+ /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
+ /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
+
+ if (dev && dev->conf.physical_block_size)
+ put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
+ if (s->wwn) {
+ /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
+ put_le16(p + 108, s->wwn >> 48);
+ put_le16(p + 109, s->wwn >> 32);
+ put_le16(p + 110, s->wwn >> 16);
+ put_le16(p + 111, s->wwn);
+ }
+ if (dev && dev->conf.discard_granularity) {
+ put_le16(p + 169, 1); /* TRIM support */
+ }
+ if (dev) {
+ put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
+ }
+
+ ide_identify_size(s);
+ s->identify_set = 1;
+
+fill_buffer:
+ memcpy(s->io_buffer, p, sizeof(s->identify_data));
+}
+
+static void ide_atapi_identify(IDEState *s)
+{
+ uint16_t *p;
+
+ p = (uint16_t *)s->identify_data;
+ if (s->identify_set) {
+ goto fill_buffer;
+ }
+ memset(p, 0, sizeof(s->identify_data));
+
+ /* Removable CDROM, 50us response, 12 byte packets */
+ put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
+ padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
+ put_le16(p + 20, 3); /* buffer type */
+ put_le16(p + 21, 512); /* cache size in sectors */
+ put_le16(p + 22, 4); /* ecc bytes */
+ padstr((char *)(p + 23), s->version, 8); /* firmware version */
+ padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
+ put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
+#ifdef USE_DMA_CDROM
+ put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
+ put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
+ put_le16(p + 62, 7); /* single word dma0-2 supported */
+ put_le16(p + 63, 7); /* mdma0-2 supported */
+#else
+ put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
+ put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
+ put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
+#endif
+ put_le16(p + 64, 3); /* pio3-4 supported */
+ put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
+ put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
+ put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
+ put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
+
+ put_le16(p + 71, 30); /* in ns */
+ put_le16(p + 72, 30); /* in ns */
+
+ if (s->ncq_queues) {
+ put_le16(p + 75, s->ncq_queues - 1);
+ /* NCQ supported */
+ put_le16(p + 76, (1 << 8));
+ }
+
+ put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
+ if (s->wwn) {
+ put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
+ put_le16(p + 87, (1 << 8)); /* WWN enabled */
+ }
+
+#ifdef USE_DMA_CDROM
+ put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
+#endif
+
+ if (s->wwn) {
+ /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
+ put_le16(p + 108, s->wwn >> 48);
+ put_le16(p + 109, s->wwn >> 32);
+ put_le16(p + 110, s->wwn >> 16);
+ put_le16(p + 111, s->wwn);
+ }
+
+ s->identify_set = 1;
+
+fill_buffer:
+ memcpy(s->io_buffer, p, sizeof(s->identify_data));
+}
+
+static void ide_cfata_identify_size(IDEState *s)
+{
+ uint16_t *p = (uint16_t *)s->identify_data;
+ put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
+ put_le16(p + 8, s->nb_sectors); /* Sectors per card */
+ put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
+ put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
+}
+
+static void ide_cfata_identify(IDEState *s)
+{
+ uint16_t *p;
+ uint32_t cur_sec;
+
+ p = (uint16_t *)s->identify_data;
+ if (s->identify_set) {
+ goto fill_buffer;
+ }
+ memset(p, 0, sizeof(s->identify_data));
+
+ cur_sec = s->cylinders * s->heads * s->sectors;
+
+ put_le16(p + 0, 0x848a); /* CF Storage Card signature */
+ put_le16(p + 1, s->cylinders); /* Default cylinders */
+ put_le16(p + 3, s->heads); /* Default heads */
+ put_le16(p + 6, s->sectors); /* Default sectors per track */
+ /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
+ /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
+ padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
+ put_le16(p + 22, 0x0004); /* ECC bytes */
+ padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
+ padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
+#if MAX_MULT_SECTORS > 1
+ put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
+#else
+ put_le16(p + 47, 0x0000);
+#endif
+ put_le16(p + 49, 0x0f00); /* Capabilities */
+ put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
+ put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
+ put_le16(p + 53, 0x0003); /* Translation params valid */
+ put_le16(p + 54, s->cylinders); /* Current cylinders */
+ put_le16(p + 55, s->heads); /* Current heads */
+ put_le16(p + 56, s->sectors); /* Current sectors */
+ put_le16(p + 57, cur_sec); /* Current capacity */
+ put_le16(p + 58, cur_sec >> 16); /* Current capacity */
+ if (s->mult_sectors) /* Multiple sector setting */
+ put_le16(p + 59, 0x100 | s->mult_sectors);
+ /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
+ /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
+ put_le16(p + 63, 0x0203); /* Multiword DMA capability */
+ put_le16(p + 64, 0x0001); /* Flow Control PIO support */
+ put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
+ put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
+ put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
+ put_le16(p + 82, 0x400c); /* Command Set supported */
+ put_le16(p + 83, 0x7068); /* Command Set supported */
+ put_le16(p + 84, 0x4000); /* Features supported */
+ put_le16(p + 85, 0x000c); /* Command Set enabled */
+ put_le16(p + 86, 0x7044); /* Command Set enabled */
+ put_le16(p + 87, 0x4000); /* Features enabled */
+ put_le16(p + 91, 0x4060); /* Current APM level */
+ put_le16(p + 129, 0x0002); /* Current features option */
+ put_le16(p + 130, 0x0005); /* Reassigned sectors */
+ put_le16(p + 131, 0x0001); /* Initial power mode */
+ put_le16(p + 132, 0x0000); /* User signature */
+ put_le16(p + 160, 0x8100); /* Power requirement */
+ put_le16(p + 161, 0x8001); /* CF command set */
+
+ ide_cfata_identify_size(s);
+ s->identify_set = 1;
+
+fill_buffer:
+ memcpy(s->io_buffer, p, sizeof(s->identify_data));
+}
+
+static void ide_set_signature(IDEState *s)
+{
+ s->select &= ~(ATA_DEV_HS); /* clear head */
+ /* put signature */
+ s->nsector = 1;
+ s->sector = 1;
+ if (s->drive_kind == IDE_CD) {
+ s->lcyl = 0x14;
+ s->hcyl = 0xeb;
+ } else if (s->blk) {
+ s->lcyl = 0;
+ s->hcyl = 0;
+ } else {
+ s->lcyl = 0xff;
+ s->hcyl = 0xff;
+ }
+}
+
+static bool ide_sect_range_ok(IDEState *s,
+ uint64_t sector, uint64_t nb_sectors)
+{
+ uint64_t total_sectors;
+
+ blk_get_geometry(s->blk, &total_sectors);
+ if (sector > total_sectors || nb_sectors > total_sectors - sector) {
+ return false;
+ }
+ return true;
+}
+
+typedef struct TrimAIOCB {
+ BlockAIOCB common;
+ IDEState *s;
+ QEMUBH *bh;
+ int ret;
+ QEMUIOVector *qiov;
+ BlockAIOCB *aiocb;
+ int i, j;
+} TrimAIOCB;
+
+static void trim_aio_cancel(BlockAIOCB *acb)
+{
+ TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
+
+ /* Exit the loop so ide_issue_trim_cb will not continue */
+ iocb->j = iocb->qiov->niov - 1;
+ iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
+
+ iocb->ret = -ECANCELED;
+
+ if (iocb->aiocb) {
+ blk_aio_cancel_async(iocb->aiocb);
+ iocb->aiocb = NULL;
+ }
+}
+
+static const AIOCBInfo trim_aiocb_info = {
+ .aiocb_size = sizeof(TrimAIOCB),
+ .cancel_async = trim_aio_cancel,
+};
+
+static void ide_trim_bh_cb(void *opaque)
+{
+ TrimAIOCB *iocb = opaque;
+
+ iocb->common.cb(iocb->common.opaque, iocb->ret);
+
+ qemu_bh_delete(iocb->bh);
+ iocb->bh = NULL;
+ qemu_aio_unref(iocb);
+}
+
+static void ide_issue_trim_cb(void *opaque, int ret)
+{
+ TrimAIOCB *iocb = opaque;
+ IDEState *s = iocb->s;
+
+ if (iocb->i >= 0) {
+ if (ret >= 0) {
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+ } else {
+ block_acct_failed(blk_get_stats(s->blk), &s->acct);
+ }
+ }
+
+ if (ret >= 0) {
+ while (iocb->j < iocb->qiov->niov) {
+ int j = iocb->j;
+ while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
+ int i = iocb->i;
+ uint64_t *buffer = iocb->qiov->iov[j].iov_base;
+
+ /* 6-byte LBA + 2-byte range per entry */
+ uint64_t entry = le64_to_cpu(buffer[i]);
+ uint64_t sector = entry & 0x0000ffffffffffffULL;
+ uint16_t count = entry >> 48;
+
+ if (count == 0) {
+ continue;
+ }
+
+ if (!ide_sect_range_ok(s, sector, count)) {
+ block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_UNMAP);
+ iocb->ret = -EINVAL;
+ goto done;
+ }
+
+ block_acct_start(blk_get_stats(s->blk), &s->acct,
+ count << BDRV_SECTOR_BITS, BLOCK_ACCT_UNMAP);
+
+ /* Got an entry! Submit and exit. */
+ iocb->aiocb = blk_aio_pdiscard(s->blk,
+ sector << BDRV_SECTOR_BITS,
+ count << BDRV_SECTOR_BITS,
+ ide_issue_trim_cb, opaque);
+ return;
+ }
+
+ iocb->j++;
+ iocb->i = -1;
+ }
+ } else {
+ iocb->ret = ret;
+ }
+
+done:
+ iocb->aiocb = NULL;
+ if (iocb->bh) {
+ replay_bh_schedule_event(iocb->bh);
+ }
+}
+
+BlockAIOCB *ide_issue_trim(
+ int64_t offset, QEMUIOVector *qiov,
+ BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
+{
+ IDEState *s = opaque;
+ TrimAIOCB *iocb;
+
+ iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
+ iocb->s = s;
+ iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
+ iocb->ret = 0;
+ iocb->qiov = qiov;
+ iocb->i = -1;
+ iocb->j = 0;
+ ide_issue_trim_cb(iocb, 0);
+ return &iocb->common;
+}
+
+void ide_abort_command(IDEState *s)
+{
+ ide_transfer_stop(s);
+ s->status = READY_STAT | ERR_STAT;
+ s->error = ABRT_ERR;
+}
+
+static void ide_set_retry(IDEState *s)
+{
+ s->bus->retry_unit = s->unit;
+ s->bus->retry_sector_num = ide_get_sector(s);
+ s->bus->retry_nsector = s->nsector;
+}
+
+static void ide_clear_retry(IDEState *s)
+{
+ s->bus->retry_unit = -1;
+ s->bus->retry_sector_num = 0;
+ s->bus->retry_nsector = 0;
+}
+
+/* prepare data transfer and tell what to do after */
+bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
+ EndTransferFunc *end_transfer_func)
+{
+ s->data_ptr = buf;
+ s->data_end = buf + size;
+ ide_set_retry(s);
+ if (!(s->status & ERR_STAT)) {
+ s->status |= DRQ_STAT;
+ }
+ if (!s->bus->dma->ops->pio_transfer) {
+ s->end_transfer_func = end_transfer_func;
+ return false;
+ }
+ s->bus->dma->ops->pio_transfer(s->bus->dma);
+ return true;
+}
+
+void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
+ EndTransferFunc *end_transfer_func)
+{
+ if (ide_transfer_start_norecurse(s, buf, size, end_transfer_func)) {
+ end_transfer_func(s);
+ }
+}
+
+static void ide_cmd_done(IDEState *s)
+{
+ if (s->bus->dma->ops->cmd_done) {
+ s->bus->dma->ops->cmd_done(s->bus->dma);
+ }
+}
+
+static void ide_transfer_halt(IDEState *s)
+{
+ s->end_transfer_func = ide_transfer_stop;
+ s->data_ptr = s->io_buffer;
+ s->data_end = s->io_buffer;
+ s->status &= ~DRQ_STAT;
+}
+
+void ide_transfer_stop(IDEState *s)
+{
+ ide_transfer_halt(s);
+ ide_cmd_done(s);
+}
+
+int64_t ide_get_sector(IDEState *s)
+{
+ int64_t sector_num;
+ if (s->select & (ATA_DEV_LBA)) {
+ if (s->lba48) {
+ sector_num = ((int64_t)s->hob_hcyl << 40) |
+ ((int64_t) s->hob_lcyl << 32) |
+ ((int64_t) s->hob_sector << 24) |
+ ((int64_t) s->hcyl << 16) |
+ ((int64_t) s->lcyl << 8) | s->sector;
+ } else {
+ /* LBA28 */
+ sector_num = ((s->select & (ATA_DEV_LBA_MSB)) << 24) |
+ (s->hcyl << 16) | (s->lcyl << 8) | s->sector;
+ }
+ } else {
+ /* CHS */
+ sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
+ (s->select & (ATA_DEV_HS)) * s->sectors + (s->sector - 1);
+ }
+
+ return sector_num;
+}
+
+void ide_set_sector(IDEState *s, int64_t sector_num)
+{
+ unsigned int cyl, r;
+ if (s->select & (ATA_DEV_LBA)) {
+ if (s->lba48) {
+ s->sector = sector_num;
+ s->lcyl = sector_num >> 8;
+ s->hcyl = sector_num >> 16;
+ s->hob_sector = sector_num >> 24;
+ s->hob_lcyl = sector_num >> 32;
+ s->hob_hcyl = sector_num >> 40;
+ } else {
+ /* LBA28 */
+ s->select = (s->select & ~(ATA_DEV_LBA_MSB)) |
+ ((sector_num >> 24) & (ATA_DEV_LBA_MSB));
+ s->hcyl = (sector_num >> 16);
+ s->lcyl = (sector_num >> 8);
+ s->sector = (sector_num);
+ }
+ } else {
+ /* CHS */
+ cyl = sector_num / (s->heads * s->sectors);
+ r = sector_num % (s->heads * s->sectors);
+ s->hcyl = cyl >> 8;
+ s->lcyl = cyl;
+ s->select = (s->select & ~(ATA_DEV_HS)) |
+ ((r / s->sectors) & (ATA_DEV_HS));
+ s->sector = (r % s->sectors) + 1;
+ }
+}
+
+static void ide_rw_error(IDEState *s) {
+ ide_abort_command(s);
+ ide_set_irq(s->bus);
+}
+
+static void ide_buffered_readv_cb(void *opaque, int ret)
+{
+ IDEBufferedRequest *req = opaque;
+ if (!req->orphaned) {
+ if (!ret) {
+ assert(req->qiov.size == req->original_qiov->size);
+ qemu_iovec_from_buf(req->original_qiov, 0,
+ req->qiov.local_iov.iov_base,
+ req->original_qiov->size);
+ }
+ req->original_cb(req->original_opaque, ret);
+ }
+ QLIST_REMOVE(req, list);
+ qemu_vfree(qemu_iovec_buf(&req->qiov));
+ g_free(req);
+}
+
+#define MAX_BUFFERED_REQS 16
+
+BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
+ QEMUIOVector *iov, int nb_sectors,
+ BlockCompletionFunc *cb, void *opaque)
+{
+ BlockAIOCB *aioreq;
+ IDEBufferedRequest *req;
+ int c = 0;
+
+ QLIST_FOREACH(req, &s->buffered_requests, list) {
+ c++;
+ }
+ if (c > MAX_BUFFERED_REQS) {
+ return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
+ }
+
+ req = g_new0(IDEBufferedRequest, 1);
+ req->original_qiov = iov;
+ req->original_cb = cb;
+ req->original_opaque = opaque;
+ qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size),
+ iov->size);
+
+ aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
+ &req->qiov, 0, ide_buffered_readv_cb, req);
+
+ QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
+ return aioreq;
+}
+
+/**
+ * Cancel all pending DMA requests.
+ * Any buffered DMA requests are instantly canceled,
+ * but any pending unbuffered DMA requests must be waited on.
+ */
+void ide_cancel_dma_sync(IDEState *s)
+{
+ IDEBufferedRequest *req;
+
+ /* First invoke the callbacks of all buffered requests
+ * and flag those requests as orphaned. Ideally there
+ * are no unbuffered (Scatter Gather DMA Requests or
+ * write requests) pending and we can avoid to drain. */
+ QLIST_FOREACH(req, &s->buffered_requests, list) {
+ if (!req->orphaned) {
+ trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
+ req->original_cb(req->original_opaque, -ECANCELED);
+ }
+ req->orphaned = true;
+ }
+
+ /*
+ * We can't cancel Scatter Gather DMA in the middle of the
+ * operation or a partial (not full) DMA transfer would reach
+ * the storage so we wait for completion instead (we behave
+ * like if the DMA was completed by the time the guest trying
+ * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
+ * set).
+ *
+ * In the future we'll be able to safely cancel the I/O if the
+ * whole DMA operation will be submitted to disk with a single
+ * aio operation with preadv/pwritev.
+ */
+ if (s->bus->dma->aiocb) {
+ trace_ide_cancel_dma_sync_remaining();
+ blk_drain(s->blk);
+ assert(s->bus->dma->aiocb == NULL);
+ }
+}
+
+static void ide_sector_read(IDEState *s);
+
+static void ide_sector_read_cb(void *opaque, int ret)
+{
+ IDEState *s = opaque;
+ int n;
+
+ s->pio_aiocb = NULL;
+ s->status &= ~BUSY_STAT;
+
+ if (ret != 0) {
+ if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
+ IDE_RETRY_READ)) {
+ return;
+ }
+ }
+
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+
+ n = s->nsector;
+ if (n > s->req_nb_sectors) {
+ n = s->req_nb_sectors;
+ }
+
+ ide_set_sector(s, ide_get_sector(s) + n);
+ s->nsector -= n;
+ /* Allow the guest to read the io_buffer */
+ ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
+ ide_set_irq(s->bus);
+}
+
+static void ide_sector_read(IDEState *s)
+{
+ int64_t sector_num;
+ int n;
+
+ s->status = READY_STAT | SEEK_STAT;
+ s->error = 0; /* not needed by IDE spec, but needed by Windows */
+ sector_num = ide_get_sector(s);
+ n = s->nsector;
+
+ if (n == 0) {
+ ide_transfer_stop(s);
+ return;
+ }
+
+ s->status |= BUSY_STAT;
+
+ if (n > s->req_nb_sectors) {
+ n = s->req_nb_sectors;
+ }
+
+ trace_ide_sector_read(sector_num, n);
+
+ if (!ide_sect_range_ok(s, sector_num, n)) {
+ ide_rw_error(s);
+ block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
+ return;
+ }
+
+ qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
+
+ block_acct_start(blk_get_stats(s->blk), &s->acct,
+ n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
+ s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
+ ide_sector_read_cb, s);
+}
+
+void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
+{
+ if (s->bus->dma->ops->commit_buf) {
+ s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
+ }
+ s->io_buffer_offset += tx_bytes;
+ qemu_sglist_destroy(&s->sg);
+}
+
+void ide_set_inactive(IDEState *s, bool more)
+{
+ s->bus->dma->aiocb = NULL;
+ ide_clear_retry(s);
+ if (s->bus->dma->ops->set_inactive) {
+ s->bus->dma->ops->set_inactive(s->bus->dma, more);
+ }
+ ide_cmd_done(s);
+}
+
+void ide_dma_error(IDEState *s)
+{
+ dma_buf_commit(s, 0);
+ ide_abort_command(s);
+ ide_set_inactive(s, false);
+ ide_set_irq(s->bus);
+}
+
+int ide_handle_rw_error(IDEState *s, int error, int op)
+{
+ bool is_read = (op & IDE_RETRY_READ) != 0;
+ BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
+
+ if (action == BLOCK_ERROR_ACTION_STOP) {
+ assert(s->bus->retry_unit == s->unit);
+ s->bus->error_status = op;
+ } else if (action == BLOCK_ERROR_ACTION_REPORT) {
+ block_acct_failed(blk_get_stats(s->blk), &s->acct);
+ if (IS_IDE_RETRY_DMA(op)) {
+ ide_dma_error(s);
+ } else if (IS_IDE_RETRY_ATAPI(op)) {
+ ide_atapi_io_error(s, -error);
+ } else {
+ ide_rw_error(s);
+ }
+ }
+ blk_error_action(s->blk, action, is_read, error);
+ return action != BLOCK_ERROR_ACTION_IGNORE;
+}
+
+static void ide_dma_cb(void *opaque, int ret)
+{
+ IDEState *s = opaque;
+ int n;
+ int64_t sector_num;
+ uint64_t offset;
+ bool stay_active = false;
+ int32_t prep_size = 0;
+
+ if (ret == -EINVAL) {
+ ide_dma_error(s);
+ return;
+ }
+
+ if (ret < 0) {
+ if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
+ s->bus->dma->aiocb = NULL;
+ dma_buf_commit(s, 0);
+ return;
+ }
+ }
+
+ if (s->io_buffer_size > s->nsector * 512) {
+ /*
+ * The PRDs were longer than needed for this request.
+ * The Active bit must remain set after the request completes.
+ */
+ n = s->nsector;
+ stay_active = true;
+ } else {
+ n = s->io_buffer_size >> 9;
+ }
+
+ sector_num = ide_get_sector(s);
+ if (n > 0) {
+ assert(n * 512 == s->sg.size);
+ dma_buf_commit(s, s->sg.size);
+ sector_num += n;
+ ide_set_sector(s, sector_num);
+ s->nsector -= n;
+ }
+
+ /* end of transfer ? */
+ if (s->nsector == 0) {
+ s->status = READY_STAT | SEEK_STAT;
+ ide_set_irq(s->bus);
+ goto eot;
+ }
+
+ /* launch next transfer */
+ n = s->nsector;
+ s->io_buffer_index = 0;
+ s->io_buffer_size = n * 512;
+ prep_size = s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size);
+ /* prepare_buf() must succeed and respect the limit */
+ assert(prep_size >= 0 && prep_size <= n * 512);
+
+ /*
+ * Now prep_size stores the number of bytes in the sglist, and
+ * s->io_buffer_size stores the number of bytes described by the PRDs.
+ */
+
+ if (prep_size < n * 512) {
+ /*
+ * The PRDs are too short for this request. Error condition!
+ * Reset the Active bit and don't raise the interrupt.
+ */
+ s->status = READY_STAT | SEEK_STAT;
+ dma_buf_commit(s, 0);
+ goto eot;
+ }
+
+ trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
+
+ if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
+ !ide_sect_range_ok(s, sector_num, n)) {
+ ide_dma_error(s);
+ block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
+ return;
+ }
+
+ offset = sector_num << BDRV_SECTOR_BITS;
+ switch (s->dma_cmd) {
+ case IDE_DMA_READ:
+ s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
+ BDRV_SECTOR_SIZE, ide_dma_cb, s);
+ break;
+ case IDE_DMA_WRITE:
+ s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
+ BDRV_SECTOR_SIZE, ide_dma_cb, s);
+ break;
+ case IDE_DMA_TRIM:
+ s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
+ &s->sg, offset, BDRV_SECTOR_SIZE,
+ ide_issue_trim, s, ide_dma_cb, s,
+ DMA_DIRECTION_TO_DEVICE);
+ break;
+ default:
+ abort();
+ }
+ return;
+
+eot:
+ if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+ }
+ ide_set_inactive(s, stay_active);
+}
+
+static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
+{
+ s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
+ s->io_buffer_size = 0;
+ s->dma_cmd = dma_cmd;
+
+ switch (dma_cmd) {
+ case IDE_DMA_READ:
+ block_acct_start(blk_get_stats(s->blk), &s->acct,
+ s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
+ break;
+ case IDE_DMA_WRITE:
+ block_acct_start(blk_get_stats(s->blk), &s->acct,
+ s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
+ break;
+ default:
+ break;
+ }
+
+ ide_start_dma(s, ide_dma_cb);
+}
+
+void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
+{
+ s->io_buffer_index = 0;
+ ide_set_retry(s);
+ if (s->bus->dma->ops->start_dma) {
+ s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
+ }
+}
+
+static void ide_sector_write(IDEState *s);
+
+static void ide_sector_write_timer_cb(void *opaque)
+{
+ IDEState *s = opaque;
+ ide_set_irq(s->bus);
+}
+
+static void ide_sector_write_cb(void *opaque, int ret)
+{
+ IDEState *s = opaque;
+ int n;
+
+ s->pio_aiocb = NULL;
+ s->status &= ~BUSY_STAT;
+
+ if (ret != 0) {
+ if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
+ return;
+ }
+ }
+
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+
+ n = s->nsector;
+ if (n > s->req_nb_sectors) {
+ n = s->req_nb_sectors;
+ }
+ s->nsector -= n;
+
+ ide_set_sector(s, ide_get_sector(s) + n);
+ if (s->nsector == 0) {
+ /* no more sectors to write */
+ ide_transfer_stop(s);
+ } else {
+ int n1 = s->nsector;
+ if (n1 > s->req_nb_sectors) {
+ n1 = s->req_nb_sectors;
+ }
+ ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
+ ide_sector_write);
+ }
+
+ if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
+ /* It seems there is a bug in the Windows 2000 installer HDD
+ IDE driver which fills the disk with empty logs when the
+ IDE write IRQ comes too early. This hack tries to correct
+ that at the expense of slower write performances. Use this
+ option _only_ to install Windows 2000. You must disable it
+ for normal use. */
+ timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ (NANOSECONDS_PER_SECOND / 1000));
+ } else {
+ ide_set_irq(s->bus);
+ }
+}
+
+static void ide_sector_write(IDEState *s)
+{
+ int64_t sector_num;
+ int n;
+
+ s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
+ sector_num = ide_get_sector(s);
+
+ n = s->nsector;
+ if (n > s->req_nb_sectors) {
+ n = s->req_nb_sectors;
+ }
+
+ trace_ide_sector_write(sector_num, n);
+
+ if (!ide_sect_range_ok(s, sector_num, n)) {
+ ide_rw_error(s);
+ block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
+ return;
+ }
+
+ qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
+
+ block_acct_start(blk_get_stats(s->blk), &s->acct,
+ n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
+ s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
+ &s->qiov, 0, ide_sector_write_cb, s);
+}
+
+static void ide_flush_cb(void *opaque, int ret)
+{
+ IDEState *s = opaque;
+
+ s->pio_aiocb = NULL;
+
+ if (ret < 0) {
+ /* XXX: What sector number to set here? */
+ if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
+ return;
+ }
+ }
+
+ if (s->blk) {
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+ }
+ s->status = READY_STAT | SEEK_STAT;
+ ide_cmd_done(s);
+ ide_set_irq(s->bus);
+}
+
+static void ide_flush_cache(IDEState *s)
+{
+ if (s->blk == NULL) {
+ ide_flush_cb(s, 0);
+ return;
+ }
+
+ s->status |= BUSY_STAT;
+ ide_set_retry(s);
+ block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
+ s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
+}
+
+static void ide_cfata_metadata_inquiry(IDEState *s)
+{
+ uint16_t *p;
+ uint32_t spd;
+
+ p = (uint16_t *) s->io_buffer;
+ memset(p, 0, 0x200);
+ spd = ((s->mdata_size - 1) >> 9) + 1;
+
+ put_le16(p + 0, 0x0001); /* Data format revision */
+ put_le16(p + 1, 0x0000); /* Media property: silicon */
+ put_le16(p + 2, s->media_changed); /* Media status */
+ put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
+ put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
+ put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
+ put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
+}
+
+static void ide_cfata_metadata_read(IDEState *s)
+{
+ uint16_t *p;
+
+ if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
+ s->status = ERR_STAT;
+ s->error = ABRT_ERR;
+ return;
+ }
+
+ p = (uint16_t *) s->io_buffer;
+ memset(p, 0, 0x200);
+
+ put_le16(p + 0, s->media_changed); /* Media status */
+ memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
+ MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
+ s->nsector << 9), 0x200 - 2));
+}
+
+static void ide_cfata_metadata_write(IDEState *s)
+{
+ if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
+ s->status = ERR_STAT;
+ s->error = ABRT_ERR;
+ return;
+ }
+
+ s->media_changed = 0;
+
+ memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
+ s->io_buffer + 2,
+ MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
+ s->nsector << 9), 0x200 - 2));
+}
+
+/* called when the inserted state of the media has changed */
+static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
+{
+ IDEState *s = opaque;
+ uint64_t nb_sectors;
+
+ s->tray_open = !load;
+ blk_get_geometry(s->blk, &nb_sectors);
+ s->nb_sectors = nb_sectors;
+
+ /*
+ * First indicate to the guest that a CD has been removed. That's
+ * done on the next command the guest sends us.
+ *
+ * Then we set UNIT_ATTENTION, by which the guest will
+ * detect a new CD in the drive. See ide_atapi_cmd() for details.
+ */
+ s->cdrom_changed = 1;
+ s->events.new_media = true;
+ s->events.eject_request = false;
+ ide_set_irq(s->bus);
+}
+
+static void ide_cd_eject_request_cb(void *opaque, bool force)
+{
+ IDEState *s = opaque;
+
+ s->events.eject_request = true;
+ if (force) {
+ s->tray_locked = false;
+ }
+ ide_set_irq(s->bus);
+}
+
+static void ide_cmd_lba48_transform(IDEState *s, int lba48)
+{
+ s->lba48 = lba48;
+
+ /* handle the 'magic' 0 nsector count conversion here. to avoid
+ * fiddling with the rest of the read logic, we just store the
+ * full sector count in ->nsector and ignore ->hob_nsector from now
+ */
+ if (!s->lba48) {
+ if (!s->nsector)
+ s->nsector = 256;
+ } else {
+ if (!s->nsector && !s->hob_nsector)
+ s->nsector = 65536;
+ else {
+ int lo = s->nsector;
+ int hi = s->hob_nsector;
+
+ s->nsector = (hi << 8) | lo;
+ }
+ }
+}
+
+static void ide_clear_hob(IDEBus *bus)
+{
+ /* any write clears HOB high bit of device control register */
+ bus->cmd &= ~(IDE_CTRL_HOB);
+}
+
+/* IOport [W]rite [R]egisters */
+enum ATA_IOPORT_WR {
+ ATA_IOPORT_WR_DATA = 0,
+ ATA_IOPORT_WR_FEATURES = 1,
+ ATA_IOPORT_WR_SECTOR_COUNT = 2,
+ ATA_IOPORT_WR_SECTOR_NUMBER = 3,
+ ATA_IOPORT_WR_CYLINDER_LOW = 4,
+ ATA_IOPORT_WR_CYLINDER_HIGH = 5,
+ ATA_IOPORT_WR_DEVICE_HEAD = 6,
+ ATA_IOPORT_WR_COMMAND = 7,
+ ATA_IOPORT_WR_NUM_REGISTERS,
+};
+
+const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
+ [ATA_IOPORT_WR_DATA] = "Data",
+ [ATA_IOPORT_WR_FEATURES] = "Features",
+ [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
+ [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
+ [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
+ [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
+ [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
+ [ATA_IOPORT_WR_COMMAND] = "Command"
+};
+
+void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
+{
+ IDEBus *bus = opaque;
+ IDEState *s = idebus_active_if(bus);
+ int reg_num = addr & 7;
+
+ trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
+
+ /* ignore writes to command block while busy with previous command */
+ if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
+ return;
+ }
+
+ /* NOTE: Device0 and Device1 both receive incoming register writes.
+ * (They're on the same bus! They have to!) */
+
+ switch (reg_num) {
+ case 0:
+ break;
+ case ATA_IOPORT_WR_FEATURES:
+ ide_clear_hob(bus);
+ bus->ifs[0].hob_feature = bus->ifs[0].feature;
+ bus->ifs[1].hob_feature = bus->ifs[1].feature;
+ bus->ifs[0].feature = val;
+ bus->ifs[1].feature = val;
+ break;
+ case ATA_IOPORT_WR_SECTOR_COUNT:
+ ide_clear_hob(bus);
+ bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
+ bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
+ bus->ifs[0].nsector = val;
+ bus->ifs[1].nsector = val;
+ break;
+ case ATA_IOPORT_WR_SECTOR_NUMBER:
+ ide_clear_hob(bus);
+ bus->ifs[0].hob_sector = bus->ifs[0].sector;
+ bus->ifs[1].hob_sector = bus->ifs[1].sector;
+ bus->ifs[0].sector = val;
+ bus->ifs[1].sector = val;
+ break;
+ case ATA_IOPORT_WR_CYLINDER_LOW:
+ ide_clear_hob(bus);
+ bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
+ bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
+ bus->ifs[0].lcyl = val;
+ bus->ifs[1].lcyl = val;
+ break;
+ case ATA_IOPORT_WR_CYLINDER_HIGH:
+ ide_clear_hob(bus);
+ bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
+ bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
+ bus->ifs[0].hcyl = val;
+ bus->ifs[1].hcyl = val;
+ break;
+ case ATA_IOPORT_WR_DEVICE_HEAD:
+ ide_clear_hob(bus);
+ bus->ifs[0].select = val | (ATA_DEV_ALWAYS_ON);
+ bus->ifs[1].select = val | (ATA_DEV_ALWAYS_ON);
+ /* select drive */
+ bus->unit = (val & (ATA_DEV_SELECT)) ? 1 : 0;
+ break;
+ default:
+ case ATA_IOPORT_WR_COMMAND:
+ ide_clear_hob(bus);
+ qemu_irq_lower(bus->irq);
+ ide_exec_cmd(bus, val);
+ break;
+ }
+}
+
+static void ide_reset(IDEState *s)
+{
+ trace_ide_reset(s);
+
+ if (s->pio_aiocb) {
+ blk_aio_cancel(s->pio_aiocb);
+ s->pio_aiocb = NULL;
+ }
+
+ if (s->drive_kind == IDE_CFATA)
+ s->mult_sectors = 0;
+ else
+ s->mult_sectors = MAX_MULT_SECTORS;
+ /* ide regs */
+ s->feature = 0;
+ s->error = 0;
+ s->nsector = 0;
+ s->sector = 0;
+ s->lcyl = 0;
+ s->hcyl = 0;
+
+ /* lba48 */
+ s->hob_feature = 0;
+ s->hob_sector = 0;
+ s->hob_nsector = 0;
+ s->hob_lcyl = 0;
+ s->hob_hcyl = 0;
+
+ s->select = (ATA_DEV_ALWAYS_ON);
+ s->status = READY_STAT | SEEK_STAT;
+
+ s->lba48 = 0;
+
+ /* ATAPI specific */
+ s->sense_key = 0;
+ s->asc = 0;
+ s->cdrom_changed = 0;
+ s->packet_transfer_size = 0;
+ s->elementary_transfer_size = 0;
+ s->io_buffer_index = 0;
+ s->cd_sector_size = 0;
+ s->atapi_dma = 0;
+ s->tray_locked = 0;
+ s->tray_open = 0;
+ /* ATA DMA state */
+ s->io_buffer_size = 0;
+ s->req_nb_sectors = 0;
+
+ ide_set_signature(s);
+ /* init the transfer handler so that 0xffff is returned on data
+ accesses */
+ s->end_transfer_func = ide_dummy_transfer_stop;
+ ide_dummy_transfer_stop(s);
+ s->media_changed = 0;
+}
+
+static bool cmd_nop(IDEState *s, uint8_t cmd)
+{
+ return true;
+}
+
+static bool cmd_device_reset(IDEState *s, uint8_t cmd)
+{
+ /* Halt PIO (in the DRQ phase), then DMA */
+ ide_transfer_halt(s);
+ ide_cancel_dma_sync(s);
+
+ /* Reset any PIO commands, reset signature, etc */
+ ide_reset(s);
+
+ /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
+ * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
+ s->status = 0x00;
+
+ /* Do not overwrite status register */
+ return false;
+}
+
+static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
+{
+ switch (s->feature) {
+ case DSM_TRIM:
+ if (s->blk) {
+ ide_sector_start_dma(s, IDE_DMA_TRIM);
+ return false;
+ }
+ break;
+ }
+
+ ide_abort_command(s);
+ return true;
+}
+
+static bool cmd_identify(IDEState *s, uint8_t cmd)
+{
+ if (s->blk && s->drive_kind != IDE_CD) {
+ if (s->drive_kind != IDE_CFATA) {
+ ide_identify(s);
+ } else {
+ ide_cfata_identify(s);
+ }
+ s->status = READY_STAT | SEEK_STAT;
+ ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
+ ide_set_irq(s->bus);
+ return false;
+ } else {
+ if (s->drive_kind == IDE_CD) {
+ ide_set_signature(s);
+ }
+ ide_abort_command(s);
+ }
+
+ return true;
+}
+
+static bool cmd_verify(IDEState *s, uint8_t cmd)
+{
+ bool lba48 = (cmd == WIN_VERIFY_EXT);
+
+ /* do sector number check ? */
+ ide_cmd_lba48_transform(s, lba48);
+
+ return true;
+}
+
+static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
+{
+ if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
+ /* Disable Read and Write Multiple */
+ s->mult_sectors = 0;
+ } else if ((s->nsector & 0xff) != 0 &&
+ ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
+ (s->nsector & (s->nsector - 1)) != 0)) {
+ ide_abort_command(s);
+ } else {
+ s->mult_sectors = s->nsector & 0xff;
+ }
+
+ return true;
+}
+
+static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
+{
+ bool lba48 = (cmd == WIN_MULTREAD_EXT);
+
+ if (!s->blk || !s->mult_sectors) {
+ ide_abort_command(s);
+ return true;
+ }
+
+ ide_cmd_lba48_transform(s, lba48);
+ s->req_nb_sectors = s->mult_sectors;
+ ide_sector_read(s);
+ return false;
+}
+
+static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
+{
+ bool lba48 = (cmd == WIN_MULTWRITE_EXT);
+ int n;
+
+ if (!s->blk || !s->mult_sectors) {
+ ide_abort_command(s);
+ return true;
+ }
+
+ ide_cmd_lba48_transform(s, lba48);
+
+ s->req_nb_sectors = s->mult_sectors;
+ n = MIN(s->nsector, s->req_nb_sectors);
+
+ s->status = SEEK_STAT | READY_STAT;
+ ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
+
+ s->media_changed = 1;
+
+ return false;
+}
+
+static bool cmd_read_pio(IDEState *s, uint8_t cmd)
+{
+ bool lba48 = (cmd == WIN_READ_EXT);
+
+ if (s->drive_kind == IDE_CD) {
+ ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
+ ide_abort_command(s);
+ return true;
+ }
+
+ if (!s->blk) {
+ ide_abort_command(s);
+ return true;
+ }
+
+ ide_cmd_lba48_transform(s, lba48);
+ s->req_nb_sectors = 1;
+ ide_sector_read(s);
+
+ return false;
+}
+
+static bool cmd_write_pio(IDEState *s, uint8_t cmd)
+{
+ bool lba48 = (cmd == WIN_WRITE_EXT);
+
+ if (!s->blk) {
+ ide_abort_command(s);
+ return true;
+ }
+
+ ide_cmd_lba48_transform(s, lba48);
+
+ s->req_nb_sectors = 1;
+ s->status = SEEK_STAT | READY_STAT;
+ ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
+
+ s->media_changed = 1;
+
+ return false;
+}
+
+static bool cmd_read_dma(IDEState *s, uint8_t cmd)
+{
+ bool lba48 = (cmd == WIN_READDMA_EXT);
+
+ if (!s->blk) {
+ ide_abort_command(s);
+ return true;
+ }
+
+ ide_cmd_lba48_transform(s, lba48);
+ ide_sector_start_dma(s, IDE_DMA_READ);
+
+ return false;
+}
+
+static bool cmd_write_dma(IDEState *s, uint8_t cmd)
+{
+ bool lba48 = (cmd == WIN_WRITEDMA_EXT);
+
+ if (!s->blk) {
+ ide_abort_command(s);
+ return true;
+ }
+
+ ide_cmd_lba48_transform(s, lba48);
+ ide_sector_start_dma(s, IDE_DMA_WRITE);
+
+ s->media_changed = 1;
+
+ return false;
+}
+
+static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
+{
+ ide_flush_cache(s);
+ return false;
+}
+
+static bool cmd_seek(IDEState *s, uint8_t cmd)
+{
+ /* XXX: Check that seek is within bounds */
+ return true;
+}
+
+static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
+{
+ bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
+
+ /* Refuse if no sectors are addressable (e.g. medium not inserted) */
+ if (s->nb_sectors == 0) {
+ ide_abort_command(s);
+ return true;
+ }
+
+ ide_cmd_lba48_transform(s, lba48);
+ ide_set_sector(s, s->nb_sectors - 1);
+
+ return true;
+}
+
+static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
+{
+ s->nsector = 0xff; /* device active or idle */
+ return true;
+}
+
+static bool cmd_set_features(IDEState *s, uint8_t cmd)
+{
+ uint16_t *identify_data;
+
+ if (!s->blk) {
+ ide_abort_command(s);
+ return true;
+ }
+
+ /* XXX: valid for CDROM ? */
+ switch (s->feature) {
+ case 0x02: /* write cache enable */
+ blk_set_enable_write_cache(s->blk, true);
+ identify_data = (uint16_t *)s->identify_data;
+ put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
+ return true;
+ case 0x82: /* write cache disable */
+ blk_set_enable_write_cache(s->blk, false);
+ identify_data = (uint16_t *)s->identify_data;
+ put_le16(identify_data + 85, (1 << 14) | 1);
+ ide_flush_cache(s);
+ return false;
+ case 0xcc: /* reverting to power-on defaults enable */
+ case 0x66: /* reverting to power-on defaults disable */
+ case 0xaa: /* read look-ahead enable */
+ case 0x55: /* read look-ahead disable */
+ case 0x05: /* set advanced power management mode */
+ case 0x85: /* disable advanced power management mode */
+ case 0x69: /* NOP */
+ case 0x67: /* NOP */
+ case 0x96: /* NOP */
+ case 0x9a: /* NOP */
+ case 0x42: /* enable Automatic Acoustic Mode */
+ case 0xc2: /* disable Automatic Acoustic Mode */
+ return true;
+ case 0x03: /* set transfer mode */
+ {
+ uint8_t val = s->nsector & 0x07;
+ identify_data = (uint16_t *)s->identify_data;
+
+ switch (s->nsector >> 3) {
+ case 0x00: /* pio default */
+ case 0x01: /* pio mode */
+ put_le16(identify_data + 62, 0x07);
+ put_le16(identify_data + 63, 0x07);
+ put_le16(identify_data + 88, 0x3f);
+ break;
+ case 0x02: /* sigle word dma mode*/
+ put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
+ put_le16(identify_data + 63, 0x07);
+ put_le16(identify_data + 88, 0x3f);
+ break;
+ case 0x04: /* mdma mode */
+ put_le16(identify_data + 62, 0x07);
+ put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
+ put_le16(identify_data + 88, 0x3f);
+ break;
+ case 0x08: /* udma mode */
+ put_le16(identify_data + 62, 0x07);
+ put_le16(identify_data + 63, 0x07);
+ put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
+ break;
+ default:
+ goto abort_cmd;
+ }
+ return true;
+ }
+ }
+
+abort_cmd:
+ ide_abort_command(s);
+ return true;
+}
+
+
+/*** ATAPI commands ***/
+
+static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
+{
+ ide_atapi_identify(s);
+ s->status = READY_STAT | SEEK_STAT;
+ ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
+ ide_set_irq(s->bus);
+ return false;
+}
+
+static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
+{
+ ide_set_signature(s);
+
+ if (s->drive_kind == IDE_CD) {
+ s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
+ * devices to return a clear status register
+ * with READY_STAT *not* set. */
+ s->error = 0x01;
+ } else {
+ s->status = READY_STAT | SEEK_STAT;
+ /* The bits of the error register are not as usual for this command!
+ * They are part of the regular output (this is why ERR_STAT isn't set)
+ * Device 0 passed, Device 1 passed or not present. */
+ s->error = 0x01;
+ ide_set_irq(s->bus);
+ }
+
+ return false;
+}
+
+static bool cmd_packet(IDEState *s, uint8_t cmd)
+{
+ /* overlapping commands not supported */
+ if (s->feature & 0x02) {
+ ide_abort_command(s);
+ return true;
+ }
+
+ s->status = READY_STAT | SEEK_STAT;
+ s->atapi_dma = s->feature & 1;
+ if (s->atapi_dma) {
+ s->dma_cmd = IDE_DMA_ATAPI;
+ }
+ s->nsector = 1;
+ ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
+ ide_atapi_cmd);
+ return false;
+}
+
+
+/*** CF-ATA commands ***/
+
+static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
+{
+ s->error = 0x09; /* miscellaneous error */
+ s->status = READY_STAT | SEEK_STAT;
+ ide_set_irq(s->bus);
+
+ return false;
+}
+
+static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
+{
+ /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
+ * required for Windows 8 to work with AHCI */
+
+ if (cmd == CFA_WEAR_LEVEL) {
+ s->nsector = 0;
+ }
+
+ if (cmd == CFA_ERASE_SECTORS) {
+ s->media_changed = 1;
+ }
+
+ return true;
+}
+
+static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
+{
+ s->status = READY_STAT | SEEK_STAT;
+
+ memset(s->io_buffer, 0, 0x200);
+ s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
+ s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
+ s->io_buffer[0x02] = s->select; /* Head */
+ s->io_buffer[0x03] = s->sector; /* Sector */
+ s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
+ s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
+ s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
+ s->io_buffer[0x13] = 0x00; /* Erase flag */
+ s->io_buffer[0x18] = 0x00; /* Hot count */
+ s->io_buffer[0x19] = 0x00; /* Hot count */
+ s->io_buffer[0x1a] = 0x01; /* Hot count */
+
+ ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
+ ide_set_irq(s->bus);
+
+ return false;
+}
+
+static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
+{
+ switch (s->feature) {
+ case 0x02: /* Inquiry Metadata Storage */
+ ide_cfata_metadata_inquiry(s);
+ break;
+ case 0x03: /* Read Metadata Storage */
+ ide_cfata_metadata_read(s);
+ break;
+ case 0x04: /* Write Metadata Storage */
+ ide_cfata_metadata_write(s);
+ break;
+ default:
+ ide_abort_command(s);
+ return true;
+ }
+
+ ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
+ s->status = 0x00; /* NOTE: READY is _not_ set */
+ ide_set_irq(s->bus);
+
+ return false;
+}
+
+static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
+{
+ switch (s->feature) {
+ case 0x01: /* sense temperature in device */
+ s->nsector = 0x50; /* +20 C */
+ break;
+ default:
+ ide_abort_command(s);
+ return true;
+ }
+
+ return true;
+}
+
+
+/*** SMART commands ***/
+
+static bool cmd_smart(IDEState *s, uint8_t cmd)
+{
+ int n;
+
+ if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
+ goto abort_cmd;
+ }
+
+ if (!s->smart_enabled && s->feature != SMART_ENABLE) {
+ goto abort_cmd;
+ }
+
+ switch (s->feature) {
+ case SMART_DISABLE:
+ s->smart_enabled = 0;
+ return true;
+
+ case SMART_ENABLE:
+ s->smart_enabled = 1;
+ return true;
+
+ case SMART_ATTR_AUTOSAVE:
+ switch (s->sector) {
+ case 0x00:
+ s->smart_autosave = 0;
+ break;
+ case 0xf1:
+ s->smart_autosave = 1;
+ break;
+ default:
+ goto abort_cmd;
+ }
+ return true;
+
+ case SMART_STATUS:
+ if (!s->smart_errors) {
+ s->hcyl = 0xc2;
+ s->lcyl = 0x4f;
+ } else {
+ s->hcyl = 0x2c;
+ s->lcyl = 0xf4;
+ }
+ return true;
+
+ case SMART_READ_THRESH:
+ memset(s->io_buffer, 0, 0x200);
+ s->io_buffer[0] = 0x01; /* smart struct version */
+
+ for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
+ s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
+ s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
+ }
+
+ /* checksum */
+ for (n = 0; n < 511; n++) {
+ s->io_buffer[511] += s->io_buffer[n];
+ }
+ s->io_buffer[511] = 0x100 - s->io_buffer[511];
+
+ s->status = READY_STAT | SEEK_STAT;
+ ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
+ ide_set_irq(s->bus);
+ return false;
+
+ case SMART_READ_DATA:
+ memset(s->io_buffer, 0, 0x200);
+ s->io_buffer[0] = 0x01; /* smart struct version */
+
+ for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
+ int i;
+ for (i = 0; i < 11; i++) {
+ s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
+ }
+ }
+
+ s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
+ if (s->smart_selftest_count == 0) {
+ s->io_buffer[363] = 0;
+ } else {
+ s->io_buffer[363] =
+ s->smart_selftest_data[3 +
+ (s->smart_selftest_count - 1) *
+ 24];
+ }
+ s->io_buffer[364] = 0x20;
+ s->io_buffer[365] = 0x01;
+ /* offline data collection capacity: execute + self-test*/
+ s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
+ s->io_buffer[368] = 0x03; /* smart capability (1) */
+ s->io_buffer[369] = 0x00; /* smart capability (2) */
+ s->io_buffer[370] = 0x01; /* error logging supported */
+ s->io_buffer[372] = 0x02; /* minutes for poll short test */
+ s->io_buffer[373] = 0x36; /* minutes for poll ext test */
+ s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
+
+ for (n = 0; n < 511; n++) {
+ s->io_buffer[511] += s->io_buffer[n];
+ }
+ s->io_buffer[511] = 0x100 - s->io_buffer[511];
+
+ s->status = READY_STAT | SEEK_STAT;
+ ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
+ ide_set_irq(s->bus);
+ return false;
+
+ case SMART_READ_LOG:
+ switch (s->sector) {
+ case 0x01: /* summary smart error log */
+ memset(s->io_buffer, 0, 0x200);
+ s->io_buffer[0] = 0x01;
+ s->io_buffer[1] = 0x00; /* no error entries */
+ s->io_buffer[452] = s->smart_errors & 0xff;
+ s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
+
+ for (n = 0; n < 511; n++) {
+ s->io_buffer[511] += s->io_buffer[n];
+ }
+ s->io_buffer[511] = 0x100 - s->io_buffer[511];
+ break;
+ case 0x06: /* smart self test log */
+ memset(s->io_buffer, 0, 0x200);
+ s->io_buffer[0] = 0x01;
+ if (s->smart_selftest_count == 0) {
+ s->io_buffer[508] = 0;
+ } else {
+ s->io_buffer[508] = s->smart_selftest_count;
+ for (n = 2; n < 506; n++) {
+ s->io_buffer[n] = s->smart_selftest_data[n];
+ }
+ }
+
+ for (n = 0; n < 511; n++) {
+ s->io_buffer[511] += s->io_buffer[n];
+ }
+ s->io_buffer[511] = 0x100 - s->io_buffer[511];
+ break;
+ default:
+ goto abort_cmd;
+ }
+ s->status = READY_STAT | SEEK_STAT;
+ ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
+ ide_set_irq(s->bus);
+ return false;
+
+ case SMART_EXECUTE_OFFLINE:
+ switch (s->sector) {
+ case 0: /* off-line routine */
+ case 1: /* short self test */
+ case 2: /* extended self test */
+ s->smart_selftest_count++;
+ if (s->smart_selftest_count > 21) {
+ s->smart_selftest_count = 1;
+ }
+ n = 2 + (s->smart_selftest_count - 1) * 24;
+ s->smart_selftest_data[n] = s->sector;
+ s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
+ s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
+ s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
+ break;
+ default:
+ goto abort_cmd;
+ }
+ return true;
+ }
+
+abort_cmd:
+ ide_abort_command(s);
+ return true;
+}
+
+#define HD_OK (1u << IDE_HD)
+#define CD_OK (1u << IDE_CD)
+#define CFA_OK (1u << IDE_CFATA)
+#define HD_CFA_OK (HD_OK | CFA_OK)
+#define ALL_OK (HD_OK | CD_OK | CFA_OK)
+
+/* Set the Disk Seek Completed status bit during completion */
+#define SET_DSC (1u << 8)
+
+/* See ACS-2 T13/2015-D Table B.2 Command codes */
+static const struct {
+ /* Returns true if the completion code should be run */
+ bool (*handler)(IDEState *s, uint8_t cmd);
+ int flags;
+} ide_cmd_table[0x100] = {
+ /* NOP not implemented, mandatory for CD */
+ [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
+ [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK },
+ [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
+ [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
+ [WIN_READ] = { cmd_read_pio, ALL_OK },
+ [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK },
+ [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
+ [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
+ [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
+ [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
+ [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
+ [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
+ [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
+ [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
+ [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
+ [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
+ [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
+ [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
+ [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
+ [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
+ [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
+ [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
+ [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
+ [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC },
+ [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK },
+ [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK },
+ [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK },
+ [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK },
+ [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
+ [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK },
+ [WIN_PACKETCMD] = { cmd_packet, CD_OK },
+ [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
+ [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
+ [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
+ [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
+ [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
+ [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
+ [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
+ [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
+ [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
+ [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
+ [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
+ [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
+ [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK },
+ [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK },
+ [WIN_STANDBY] = { cmd_nop, HD_CFA_OK },
+ [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK },
+ [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
+ [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK },
+ [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
+ [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
+ [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
+ [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
+ [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
+ [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
+ [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
+};
+
+static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
+{
+ return cmd < ARRAY_SIZE(ide_cmd_table)
+ && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
+}
+
+void ide_exec_cmd(IDEBus *bus, uint32_t val)
+{
+ IDEState *s;
+ bool complete;
+
+ s = idebus_active_if(bus);
+ trace_ide_exec_cmd(bus, s, val);
+
+ /* ignore commands to non existent slave */
+ if (s != bus->ifs && !s->blk) {
+ return;
+ }
+
+ /* Only RESET is allowed while BSY and/or DRQ are set,
+ * and only to ATAPI devices. */
+ if (s->status & (BUSY_STAT|DRQ_STAT)) {
+ if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
+ return;
+ }
+ }
+
+ if (!ide_cmd_permitted(s, val)) {
+ ide_abort_command(s);
+ ide_set_irq(s->bus);
+ return;
+ }
+
+ s->status = READY_STAT | BUSY_STAT;
+ s->error = 0;
+ s->io_buffer_offset = 0;
+
+ complete = ide_cmd_table[val].handler(s, val);
+ if (complete) {
+ s->status &= ~BUSY_STAT;
+ assert(!!s->error == !!(s->status & ERR_STAT));
+
+ if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
+ s->status |= SEEK_STAT;
+ }
+
+ ide_cmd_done(s);
+ ide_set_irq(s->bus);
+ }
+}
+
+/* IOport [R]ead [R]egisters */
+enum ATA_IOPORT_RR {
+ ATA_IOPORT_RR_DATA = 0,
+ ATA_IOPORT_RR_ERROR = 1,
+ ATA_IOPORT_RR_SECTOR_COUNT = 2,
+ ATA_IOPORT_RR_SECTOR_NUMBER = 3,
+ ATA_IOPORT_RR_CYLINDER_LOW = 4,
+ ATA_IOPORT_RR_CYLINDER_HIGH = 5,
+ ATA_IOPORT_RR_DEVICE_HEAD = 6,
+ ATA_IOPORT_RR_STATUS = 7,
+ ATA_IOPORT_RR_NUM_REGISTERS,
+};
+
+const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
+ [ATA_IOPORT_RR_DATA] = "Data",
+ [ATA_IOPORT_RR_ERROR] = "Error",
+ [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
+ [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
+ [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
+ [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
+ [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
+ [ATA_IOPORT_RR_STATUS] = "Status"
+};
+
+uint32_t ide_ioport_read(void *opaque, uint32_t addr)
+{
+ IDEBus *bus = opaque;
+ IDEState *s = idebus_active_if(bus);
+ uint32_t reg_num;
+ int ret, hob;
+
+ reg_num = addr & 7;
+ hob = bus->cmd & (IDE_CTRL_HOB);
+ switch (reg_num) {
+ case ATA_IOPORT_RR_DATA:
+ ret = 0xff;
+ break;
+ case ATA_IOPORT_RR_ERROR:
+ if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
+ (s != bus->ifs && !s->blk)) {
+ ret = 0;
+ } else if (!hob) {
+ ret = s->error;
+ } else {
+ ret = s->hob_feature;
+ }
+ break;
+ case ATA_IOPORT_RR_SECTOR_COUNT:
+ if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
+ ret = 0;
+ } else if (!hob) {
+ ret = s->nsector & 0xff;
+ } else {
+ ret = s->hob_nsector;
+ }
+ break;
+ case ATA_IOPORT_RR_SECTOR_NUMBER:
+ if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
+ ret = 0;
+ } else if (!hob) {
+ ret = s->sector;
+ } else {
+ ret = s->hob_sector;
+ }
+ break;
+ case ATA_IOPORT_RR_CYLINDER_LOW:
+ if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
+ ret = 0;
+ } else if (!hob) {
+ ret = s->lcyl;
+ } else {
+ ret = s->hob_lcyl;
+ }
+ break;
+ case ATA_IOPORT_RR_CYLINDER_HIGH:
+ if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
+ ret = 0;
+ } else if (!hob) {
+ ret = s->hcyl;
+ } else {
+ ret = s->hob_hcyl;
+ }
+ break;
+ case ATA_IOPORT_RR_DEVICE_HEAD:
+ if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
+ ret = 0;
+ } else {
+ ret = s->select;
+ }
+ break;
+ default:
+ case ATA_IOPORT_RR_STATUS:
+ if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
+ (s != bus->ifs && !s->blk)) {
+ ret = 0;
+ } else {
+ ret = s->status;
+ }
+ qemu_irq_lower(bus->irq);
+ break;
+ }
+
+ trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
+ return ret;
+}
+
+uint32_t ide_status_read(void *opaque, uint32_t addr)
+{
+ IDEBus *bus = opaque;
+ IDEState *s = idebus_active_if(bus);
+ int ret;
+
+ if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
+ (s != bus->ifs && !s->blk)) {
+ ret = 0;
+ } else {
+ ret = s->status;
+ }
+
+ trace_ide_status_read(addr, ret, bus, s);
+ return ret;
+}
+
+static void ide_perform_srst(IDEState *s)
+{
+ s->status |= BUSY_STAT;
+
+ /* Halt PIO (Via register state); PIO BH remains scheduled. */
+ ide_transfer_halt(s);
+
+ /* Cancel DMA -- may drain block device and invoke callbacks */
+ ide_cancel_dma_sync(s);
+
+ /* Cancel PIO callback, reset registers/signature, etc */
+ ide_reset(s);
+
+ /* perform diagnostic */
+ cmd_exec_dev_diagnostic(s, WIN_DIAGNOSE);
+}
+
+static void ide_bus_perform_srst(void *opaque)
+{
+ IDEBus *bus = opaque;
+ IDEState *s;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ s = &bus->ifs[i];
+ ide_perform_srst(s);
+ }
+
+ bus->cmd &= ~IDE_CTRL_RESET;
+}
+
+void ide_ctrl_write(void *opaque, uint32_t addr, uint32_t val)
+{
+ IDEBus *bus = opaque;
+ IDEState *s;
+ int i;
+
+ trace_ide_ctrl_write(addr, val, bus);
+
+ /* Device0 and Device1 each have their own control register,
+ * but QEMU models it as just one register in the controller. */
+ if (!(bus->cmd & IDE_CTRL_RESET) && (val & IDE_CTRL_RESET)) {
+ for (i = 0; i < 2; i++) {
+ s = &bus->ifs[i];
+ s->status |= BUSY_STAT;
+ }
+ replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
+ ide_bus_perform_srst, bus);
+ }
+
+ bus->cmd = val;
+}
+
+/*
+ * Returns true if the running PIO transfer is a PIO out (i.e. data is
+ * transferred from the device to the guest), false if it's a PIO in
+ */
+static bool ide_is_pio_out(IDEState *s)
+{
+ if (s->end_transfer_func == ide_sector_write ||
+ s->end_transfer_func == ide_atapi_cmd) {
+ return false;
+ } else if (s->end_transfer_func == ide_sector_read ||
+ s->end_transfer_func == ide_transfer_stop ||
+ s->end_transfer_func == ide_atapi_cmd_reply_end ||
+ s->end_transfer_func == ide_dummy_transfer_stop) {
+ return true;
+ }
+
+ abort();
+}
+
+void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
+{
+ IDEBus *bus = opaque;
+ IDEState *s = idebus_active_if(bus);
+ uint8_t *p;
+
+ trace_ide_data_writew(addr, val, bus, s);
+
+ /* PIO data access allowed only when DRQ bit is set. The result of a write
+ * during PIO out is indeterminate, just ignore it. */
+ if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
+ return;
+ }
+
+ p = s->data_ptr;
+ if (p + 2 > s->data_end) {
+ return;
+ }
+
+ *(uint16_t *)p = le16_to_cpu(val);
+ p += 2;
+ s->data_ptr = p;
+ if (p >= s->data_end) {
+ s->status &= ~DRQ_STAT;
+ s->end_transfer_func(s);
+ }
+}
+
+uint32_t ide_data_readw(void *opaque, uint32_t addr)
+{
+ IDEBus *bus = opaque;
+ IDEState *s = idebus_active_if(bus);
+ uint8_t *p;
+ int ret;
+
+ /* PIO data access allowed only when DRQ bit is set. The result of a read
+ * during PIO in is indeterminate, return 0 and don't move forward. */
+ if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
+ return 0;
+ }
+
+ p = s->data_ptr;
+ if (p + 2 > s->data_end) {
+ return 0;
+ }
+
+ ret = cpu_to_le16(*(uint16_t *)p);
+ p += 2;
+ s->data_ptr = p;
+ if (p >= s->data_end) {
+ s->status &= ~DRQ_STAT;
+ s->end_transfer_func(s);
+ }
+
+ trace_ide_data_readw(addr, ret, bus, s);
+ return ret;
+}
+
+void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
+{
+ IDEBus *bus = opaque;
+ IDEState *s = idebus_active_if(bus);
+ uint8_t *p;
+
+ trace_ide_data_writel(addr, val, bus, s);
+
+ /* PIO data access allowed only when DRQ bit is set. The result of a write
+ * during PIO out is indeterminate, just ignore it. */
+ if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
+ return;
+ }
+
+ p = s->data_ptr;
+ if (p + 4 > s->data_end) {
+ return;
+ }
+
+ *(uint32_t *)p = le32_to_cpu(val);
+ p += 4;
+ s->data_ptr = p;
+ if (p >= s->data_end) {
+ s->status &= ~DRQ_STAT;
+ s->end_transfer_func(s);
+ }
+}
+
+uint32_t ide_data_readl(void *opaque, uint32_t addr)
+{
+ IDEBus *bus = opaque;
+ IDEState *s = idebus_active_if(bus);
+ uint8_t *p;
+ int ret;
+
+ /* PIO data access allowed only when DRQ bit is set. The result of a read
+ * during PIO in is indeterminate, return 0 and don't move forward. */
+ if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
+ ret = 0;
+ goto out;
+ }
+
+ p = s->data_ptr;
+ if (p + 4 > s->data_end) {
+ return 0;
+ }
+
+ ret = cpu_to_le32(*(uint32_t *)p);
+ p += 4;
+ s->data_ptr = p;
+ if (p >= s->data_end) {
+ s->status &= ~DRQ_STAT;
+ s->end_transfer_func(s);
+ }
+
+out:
+ trace_ide_data_readl(addr, ret, bus, s);
+ return ret;
+}
+
+static void ide_dummy_transfer_stop(IDEState *s)
+{
+ s->data_ptr = s->io_buffer;
+ s->data_end = s->io_buffer;
+ s->io_buffer[0] = 0xff;
+ s->io_buffer[1] = 0xff;
+ s->io_buffer[2] = 0xff;
+ s->io_buffer[3] = 0xff;
+}
+
+void ide_bus_reset(IDEBus *bus)
+{
+ bus->unit = 0;
+ bus->cmd = 0;
+ ide_reset(&bus->ifs[0]);
+ ide_reset(&bus->ifs[1]);
+ ide_clear_hob(bus);
+
+ /* pending async DMA */
+ if (bus->dma->aiocb) {
+ trace_ide_bus_reset_aio();
+ blk_aio_cancel(bus->dma->aiocb);
+ bus->dma->aiocb = NULL;
+ }
+
+ /* reset dma provider too */
+ if (bus->dma->ops->reset) {
+ bus->dma->ops->reset(bus->dma);
+ }
+}
+
+static bool ide_cd_is_tray_open(void *opaque)
+{
+ return ((IDEState *)opaque)->tray_open;
+}
+
+static bool ide_cd_is_medium_locked(void *opaque)
+{
+ return ((IDEState *)opaque)->tray_locked;
+}
+
+static void ide_resize_cb(void *opaque)
+{
+ IDEState *s = opaque;
+ uint64_t nb_sectors;
+
+ if (!s->identify_set) {
+ return;
+ }
+
+ blk_get_geometry(s->blk, &nb_sectors);
+ s->nb_sectors = nb_sectors;
+
+ /* Update the identify data buffer. */
+ if (s->drive_kind == IDE_CFATA) {
+ ide_cfata_identify_size(s);
+ } else {
+ /* IDE_CD uses a different set of callbacks entirely. */
+ assert(s->drive_kind != IDE_CD);
+ ide_identify_size(s);
+ }
+}
+
+static const BlockDevOps ide_cd_block_ops = {
+ .change_media_cb = ide_cd_change_cb,
+ .eject_request_cb = ide_cd_eject_request_cb,
+ .is_tray_open = ide_cd_is_tray_open,
+ .is_medium_locked = ide_cd_is_medium_locked,
+};
+
+static const BlockDevOps ide_hd_block_ops = {
+ .resize_cb = ide_resize_cb,
+};
+
+int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
+ const char *version, const char *serial, const char *model,
+ uint64_t wwn,
+ uint32_t cylinders, uint32_t heads, uint32_t secs,
+ int chs_trans, Error **errp)
+{
+ uint64_t nb_sectors;
+
+ s->blk = blk;
+ s->drive_kind = kind;
+
+ blk_get_geometry(blk, &nb_sectors);
+ s->cylinders = cylinders;
+ s->heads = heads;
+ s->sectors = secs;
+ s->chs_trans = chs_trans;
+ s->nb_sectors = nb_sectors;
+ s->wwn = wwn;
+ /* The SMART values should be preserved across power cycles
+ but they aren't. */
+ s->smart_enabled = 1;
+ s->smart_autosave = 1;
+ s->smart_errors = 0;
+ s->smart_selftest_count = 0;
+ if (kind == IDE_CD) {
+ blk_set_dev_ops(blk, &ide_cd_block_ops, s);
+ blk_set_guest_block_size(blk, 2048);
+ } else {
+ if (!blk_is_inserted(s->blk)) {
+ error_setg(errp, "Device needs media, but drive is empty");
+ return -1;
+ }
+ if (!blk_is_writable(blk)) {
+ error_setg(errp, "Can't use a read-only drive");
+ return -1;
+ }
+ blk_set_dev_ops(blk, &ide_hd_block_ops, s);
+ }
+ if (serial) {
+ pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
+ } else {
+ snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
+ "QM%05d", s->drive_serial);
+ }
+ if (model) {
+ pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
+ } else {
+ switch (kind) {
+ case IDE_CD:
+ strcpy(s->drive_model_str, "QEMU DVD-ROM");
+ break;
+ case IDE_CFATA:
+ strcpy(s->drive_model_str, "QEMU MICRODRIVE");
+ break;
+ default:
+ strcpy(s->drive_model_str, "QEMU HARDDISK");
+ break;
+ }
+ }
+
+ if (version) {
+ pstrcpy(s->version, sizeof(s->version), version);
+ } else {
+ pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
+ }
+
+ ide_reset(s);
+ blk_iostatus_enable(blk);
+ return 0;
+}
+
+static void ide_init1(IDEBus *bus, int unit)
+{
+ static int drive_serial = 1;
+ IDEState *s = &bus->ifs[unit];
+
+ s->bus = bus;
+ s->unit = unit;
+ s->drive_serial = drive_serial++;
+ /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
+ s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
+ s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
+ memset(s->io_buffer, 0, s->io_buffer_total_len);
+
+ s->smart_selftest_data = blk_blockalign(s->blk, 512);
+ memset(s->smart_selftest_data, 0, 512);
+
+ s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ ide_sector_write_timer_cb, s);
+}
+
+static int ide_nop_int(const IDEDMA *dma, bool is_write)
+{
+ return 0;
+}
+
+static void ide_nop(const IDEDMA *dma)
+{
+}
+
+static int32_t ide_nop_int32(const IDEDMA *dma, int32_t l)
+{
+ return 0;
+}
+
+static const IDEDMAOps ide_dma_nop_ops = {
+ .prepare_buf = ide_nop_int32,
+ .restart_dma = ide_nop,
+ .rw_buf = ide_nop_int,
+};
+
+static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
+{
+ s->unit = s->bus->retry_unit;
+ ide_set_sector(s, s->bus->retry_sector_num);
+ s->nsector = s->bus->retry_nsector;
+ s->bus->dma->ops->restart_dma(s->bus->dma);
+ s->io_buffer_size = 0;
+ s->dma_cmd = dma_cmd;
+ ide_start_dma(s, ide_dma_cb);
+}
+
+static void ide_restart_bh(void *opaque)
+{
+ IDEBus *bus = opaque;
+ IDEState *s;
+ bool is_read;
+ int error_status;
+
+ qemu_bh_delete(bus->bh);
+ bus->bh = NULL;
+
+ error_status = bus->error_status;
+ if (bus->error_status == 0) {
+ return;
+ }
+
+ s = idebus_active_if(bus);
+ is_read = (bus->error_status & IDE_RETRY_READ) != 0;
+
+ /* The error status must be cleared before resubmitting the request: The
+ * request may fail again, and this case can only be distinguished if the
+ * called function can set a new error status. */
+ bus->error_status = 0;
+
+ /* The HBA has generically asked to be kicked on retry */
+ if (error_status & IDE_RETRY_HBA) {
+ if (s->bus->dma->ops->restart) {
+ s->bus->dma->ops->restart(s->bus->dma);
+ }
+ } else if (IS_IDE_RETRY_DMA(error_status)) {
+ if (error_status & IDE_RETRY_TRIM) {
+ ide_restart_dma(s, IDE_DMA_TRIM);
+ } else {
+ ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
+ }
+ } else if (IS_IDE_RETRY_PIO(error_status)) {
+ if (is_read) {
+ ide_sector_read(s);
+ } else {
+ ide_sector_write(s);
+ }
+ } else if (error_status & IDE_RETRY_FLUSH) {
+ ide_flush_cache(s);
+ } else if (IS_IDE_RETRY_ATAPI(error_status)) {
+ assert(s->end_transfer_func == ide_atapi_cmd);
+ ide_atapi_dma_restart(s);
+ } else {
+ abort();
+ }
+}
+
+static void ide_restart_cb(void *opaque, bool running, RunState state)
+{
+ IDEBus *bus = opaque;
+
+ if (!running)
+ return;
+
+ if (!bus->bh) {
+ bus->bh = qemu_bh_new(ide_restart_bh, bus);
+ qemu_bh_schedule(bus->bh);
+ }
+}
+
+void ide_register_restart_cb(IDEBus *bus)
+{
+ if (bus->dma->ops->restart_dma) {
+ bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
+ }
+}
+
+static IDEDMA ide_dma_nop = {
+ .ops = &ide_dma_nop_ops,
+ .aiocb = NULL,
+};
+
+void ide_init2(IDEBus *bus, qemu_irq irq)
+{
+ int i;
+
+ for(i = 0; i < 2; i++) {
+ ide_init1(bus, i);
+ ide_reset(&bus->ifs[i]);
+ }
+ bus->irq = irq;
+ bus->dma = &ide_dma_nop;
+}
+
+void ide_exit(IDEState *s)
+{
+ timer_free(s->sector_write_timer);
+ qemu_vfree(s->smart_selftest_data);
+ qemu_vfree(s->io_buffer);
+}
+
+static bool is_identify_set(void *opaque, int version_id)
+{
+ IDEState *s = opaque;
+
+ return s->identify_set != 0;
+}
+
+static EndTransferFunc* transfer_end_table[] = {
+ ide_sector_read,
+ ide_sector_write,
+ ide_transfer_stop,
+ ide_atapi_cmd_reply_end,
+ ide_atapi_cmd,
+ ide_dummy_transfer_stop,
+};
+
+static int transfer_end_table_idx(EndTransferFunc *fn)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
+ if (transfer_end_table[i] == fn)
+ return i;
+
+ return -1;
+}
+
+static int ide_drive_post_load(void *opaque, int version_id)
+{
+ IDEState *s = opaque;
+
+ if (s->blk && s->identify_set) {
+ blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
+ }
+ return 0;
+}
+
+static int ide_drive_pio_post_load(void *opaque, int version_id)
+{
+ IDEState *s = opaque;
+
+ if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
+ return -EINVAL;
+ }
+ s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
+ s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
+ s->data_end = s->data_ptr + s->cur_io_buffer_len;
+ s->atapi_dma = s->feature & 1; /* as per cmd_packet */
+
+ return 0;
+}
+
+static int ide_drive_pio_pre_save(void *opaque)
+{
+ IDEState *s = opaque;
+ int idx;
+
+ s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
+ s->cur_io_buffer_len = s->data_end - s->data_ptr;
+
+ idx = transfer_end_table_idx(s->end_transfer_func);
+ if (idx == -1) {
+ fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
+ __func__);
+ s->end_transfer_fn_idx = 2;
+ } else {
+ s->end_transfer_fn_idx = idx;
+ }
+
+ return 0;
+}
+
+static bool ide_drive_pio_state_needed(void *opaque)
+{
+ IDEState *s = opaque;
+
+ return ((s->status & DRQ_STAT) != 0)
+ || (s->bus->error_status & IDE_RETRY_PIO);
+}
+
+static bool ide_tray_state_needed(void *opaque)
+{
+ IDEState *s = opaque;
+
+ return s->tray_open || s->tray_locked;
+}
+
+static bool ide_atapi_gesn_needed(void *opaque)
+{
+ IDEState *s = opaque;
+
+ return s->events.new_media || s->events.eject_request;
+}
+
+static bool ide_error_needed(void *opaque)
+{
+ IDEBus *bus = opaque;
+
+ return (bus->error_status != 0);
+}
+
+/* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
+static const VMStateDescription vmstate_ide_atapi_gesn_state = {
+ .name ="ide_drive/atapi/gesn_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = ide_atapi_gesn_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(events.new_media, IDEState),
+ VMSTATE_BOOL(events.eject_request, IDEState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_ide_tray_state = {
+ .name = "ide_drive/tray_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = ide_tray_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(tray_open, IDEState),
+ VMSTATE_BOOL(tray_locked, IDEState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_ide_drive_pio_state = {
+ .name = "ide_drive/pio_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = ide_drive_pio_pre_save,
+ .post_load = ide_drive_pio_post_load,
+ .needed = ide_drive_pio_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32(req_nb_sectors, IDEState),
+ VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
+ vmstate_info_uint8, uint8_t),
+ VMSTATE_INT32(cur_io_buffer_offset, IDEState),
+ VMSTATE_INT32(cur_io_buffer_len, IDEState),
+ VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
+ VMSTATE_INT32(elementary_transfer_size, IDEState),
+ VMSTATE_INT32(packet_transfer_size, IDEState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_ide_drive = {
+ .name = "ide_drive",
+ .version_id = 3,
+ .minimum_version_id = 0,
+ .post_load = ide_drive_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32(mult_sectors, IDEState),
+ VMSTATE_INT32(identify_set, IDEState),
+ VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
+ VMSTATE_UINT8(feature, IDEState),
+ VMSTATE_UINT8(error, IDEState),
+ VMSTATE_UINT32(nsector, IDEState),
+ VMSTATE_UINT8(sector, IDEState),
+ VMSTATE_UINT8(lcyl, IDEState),
+ VMSTATE_UINT8(hcyl, IDEState),
+ VMSTATE_UINT8(hob_feature, IDEState),
+ VMSTATE_UINT8(hob_sector, IDEState),
+ VMSTATE_UINT8(hob_nsector, IDEState),
+ VMSTATE_UINT8(hob_lcyl, IDEState),
+ VMSTATE_UINT8(hob_hcyl, IDEState),
+ VMSTATE_UINT8(select, IDEState),
+ VMSTATE_UINT8(status, IDEState),
+ VMSTATE_UINT8(lba48, IDEState),
+ VMSTATE_UINT8(sense_key, IDEState),
+ VMSTATE_UINT8(asc, IDEState),
+ VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_ide_drive_pio_state,
+ &vmstate_ide_tray_state,
+ &vmstate_ide_atapi_gesn_state,
+ NULL
+ }
+};
+
+static const VMStateDescription vmstate_ide_error_status = {
+ .name ="ide_bus/error",
+ .version_id = 2,
+ .minimum_version_id = 1,
+ .needed = ide_error_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32(error_status, IDEBus),
+ VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
+ VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
+ VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_ide_bus = {
+ .name = "ide_bus",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(cmd, IDEBus),
+ VMSTATE_UINT8(unit, IDEBus),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_ide_error_status,
+ NULL
+ }
+};
+
+void ide_drive_get(DriveInfo **hd, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ hd[i] = drive_get_by_index(IF_IDE, i);
+ }
+}
diff --git a/hw/ide/ich.c b/hw/ide/ich.c
new file mode 100644
index 000000000..1007a51fc
--- /dev/null
+++ b/hw/ide/ich.c
@@ -0,0 +1,197 @@
+/*
+ * QEMU ICH Emulation
+ *
+ * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
+ * Copyright (c) 2010 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * lspci dump of a ICH-9 real device
+ *
+ * 00:1f.2 SATA controller [0106]: Intel Corporation 82801IR/IO/IH (ICH9R/DO/DH) 6 port SATA AHCI Controller [8086:2922] (rev 02) (prog-if 01 [AHCI 1.0])
+ * Subsystem: Intel Corporation 82801IR/IO/IH (ICH9R/DO/DH) 6 port SATA AHCI Controller [8086:2922]
+ * Control: I/O+ Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR- FastB2B- DisINTx+
+ * Status: Cap+ 66MHz+ UDF- FastB2B+ ParErr- DEVSEL=medium >TAbort- <TAbort- <MAbort- >SERR- <PERR- INTx-
+ * Latency: 0
+ * Interrupt: pin B routed to IRQ 222
+ * Region 0: I/O ports at d000 [size=8]
+ * Region 1: I/O ports at cc00 [size=4]
+ * Region 2: I/O ports at c880 [size=8]
+ * Region 3: I/O ports at c800 [size=4]
+ * Region 4: I/O ports at c480 [size=32]
+ * Region 5: Memory at febf9000 (32-bit, non-prefetchable) [size=2K]
+ * Capabilities: [80] Message Signalled Interrupts: Mask- 64bit- Count=1/16 Enable+
+ * Address: fee0f00c Data: 41d9
+ * Capabilities: [70] Power Management version 3
+ * Flags: PMEClk- DSI- D1- D2- AuxCurrent=0mA PME(D0-,D1-,D2-,D3hot+,D3cold-)
+ * Status: D0 PME-Enable- DSel=0 DScale=0 PME-
+ * Capabilities: [a8] SATA HBA <?>
+ * Capabilities: [b0] Vendor Specific Information <?>
+ * Kernel driver in use: ahci
+ * Kernel modules: ahci
+ * 00: 86 80 22 29 07 04 b0 02 02 01 06 01 00 00 00 00
+ * 10: 01 d0 00 00 01 cc 00 00 81 c8 00 00 01 c8 00 00
+ * 20: 81 c4 00 00 00 90 bf fe 00 00 00 00 86 80 22 29
+ * 30: 00 00 00 00 80 00 00 00 00 00 00 00 0f 02 00 00
+ * 40: 00 80 00 80 00 00 00 00 00 00 00 00 00 00 00 00
+ * 50: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 60: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 70: 01 a8 03 40 08 00 00 00 00 00 00 00 00 00 00 00
+ * 80: 05 70 09 00 0c f0 e0 fe d9 41 00 00 00 00 00 00
+ * 90: 40 00 0f 82 93 01 00 00 00 00 00 00 00 00 00 00
+ * a0: ac 00 00 00 0a 00 12 00 12 b0 10 00 48 00 00 00
+ * b0: 09 00 06 20 00 00 00 00 00 00 00 00 00 00 00 00
+ * c0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * d0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * e0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * f0: 00 00 00 00 00 00 00 00 86 0f 02 00 00 00 00 00
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/pci.h"
+#include "migration/vmstate.h"
+#include "qemu/module.h"
+#include "hw/isa/isa.h"
+#include "sysemu/dma.h"
+#include "hw/ide/pci.h"
+#include "ahci_internal.h"
+
+#define ICH9_MSI_CAP_OFFSET 0x80
+#define ICH9_SATA_CAP_OFFSET 0xA8
+
+#define ICH9_IDP_BAR 4
+#define ICH9_MEM_BAR 5
+
+#define ICH9_IDP_INDEX 0x10
+#define ICH9_IDP_INDEX_LOG2 0x04
+
+static const VMStateDescription vmstate_ich9_ahci = {
+ .name = "ich9_ahci",
+ .version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(parent_obj, AHCIPCIState),
+ VMSTATE_AHCI(ahci, AHCIPCIState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void pci_ich9_reset(DeviceState *dev)
+{
+ AHCIPCIState *d = ICH9_AHCI(dev);
+
+ ahci_reset(&d->ahci);
+}
+
+static void pci_ich9_ahci_init(Object *obj)
+{
+ struct AHCIPCIState *d = ICH9_AHCI(obj);
+
+ ahci_init(&d->ahci, DEVICE(obj));
+}
+
+static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp)
+{
+ struct AHCIPCIState *d;
+ int sata_cap_offset;
+ uint8_t *sata_cap;
+ d = ICH9_AHCI(dev);
+ int ret;
+
+ ahci_realize(&d->ahci, DEVICE(dev), pci_get_address_space(dev), 6);
+
+ pci_config_set_prog_interface(dev->config, AHCI_PROGMODE_MAJOR_REV_1);
+
+ dev->config[PCI_CACHE_LINE_SIZE] = 0x08; /* Cache line size */
+ dev->config[PCI_LATENCY_TIMER] = 0x00; /* Latency timer */
+ pci_config_set_interrupt_pin(dev->config, 1);
+
+ /* XXX Software should program this register */
+ dev->config[0x90] = 1 << 6; /* Address Map Register - AHCI mode */
+
+ d->ahci.irq = pci_allocate_irq(dev);
+
+ pci_register_bar(dev, ICH9_IDP_BAR, PCI_BASE_ADDRESS_SPACE_IO,
+ &d->ahci.idp);
+ pci_register_bar(dev, ICH9_MEM_BAR, PCI_BASE_ADDRESS_SPACE_MEMORY,
+ &d->ahci.mem);
+
+ sata_cap_offset = pci_add_capability(dev, PCI_CAP_ID_SATA,
+ ICH9_SATA_CAP_OFFSET, SATA_CAP_SIZE,
+ errp);
+ if (sata_cap_offset < 0) {
+ return;
+ }
+
+ sata_cap = dev->config + sata_cap_offset;
+ pci_set_word(sata_cap + SATA_CAP_REV, 0x10);
+ pci_set_long(sata_cap + SATA_CAP_BAR,
+ (ICH9_IDP_BAR + 0x4) | (ICH9_IDP_INDEX_LOG2 << 4));
+ d->ahci.idp_offset = ICH9_IDP_INDEX;
+
+ /* Although the AHCI 1.3 specification states that the first capability
+ * should be PMCAP, the Intel ICH9 data sheet specifies that the ICH9
+ * AHCI device puts the MSI capability first, pointing to 0x80. */
+ ret = msi_init(dev, ICH9_MSI_CAP_OFFSET, 1, true, false, NULL);
+ /* Any error other than -ENOTSUP(board's MSI support is broken)
+ * is a programming error. Fall back to INTx silently on -ENOTSUP */
+ assert(!ret || ret == -ENOTSUP);
+}
+
+static void pci_ich9_uninit(PCIDevice *dev)
+{
+ struct AHCIPCIState *d;
+ d = ICH9_AHCI(dev);
+
+ msi_uninit(dev);
+ ahci_uninit(&d->ahci);
+ qemu_free_irq(d->ahci.irq);
+}
+
+static void ich_ahci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->realize = pci_ich9_ahci_realize;
+ k->exit = pci_ich9_uninit;
+ k->vendor_id = PCI_VENDOR_ID_INTEL;
+ k->device_id = PCI_DEVICE_ID_INTEL_82801IR;
+ k->revision = 0x02;
+ k->class_id = PCI_CLASS_STORAGE_SATA;
+ dc->vmsd = &vmstate_ich9_ahci;
+ dc->reset = pci_ich9_reset;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo ich_ahci_info = {
+ .name = TYPE_ICH9_AHCI,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(AHCIPCIState),
+ .instance_init = pci_ich9_ahci_init,
+ .class_init = ich_ahci_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { },
+ },
+};
+
+static void ich_ahci_register_types(void)
+{
+ type_register_static(&ich_ahci_info);
+}
+
+type_init(ich_ahci_register_types)
diff --git a/hw/ide/ioport.c b/hw/ide/ioport.c
new file mode 100644
index 000000000..e6caa537f
--- /dev/null
+++ b/hw/ide/ioport.c
@@ -0,0 +1,68 @@
+/*
+ * QEMU IDE disk and CD/DVD-ROM Emulator
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/isa/isa.h"
+#include "qemu/error-report.h"
+#include "qemu/timer.h"
+#include "sysemu/blockdev.h"
+#include "sysemu/dma.h"
+#include "hw/block/block.h"
+#include "sysemu/block-backend.h"
+#include "qapi/error.h"
+#include "qemu/cutils.h"
+#include "sysemu/replay.h"
+
+#include "hw/ide/internal.h"
+#include "trace.h"
+
+static const MemoryRegionPortio ide_portio_list[] = {
+ { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
+ { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
+ { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
+ PORTIO_END_OF_LIST(),
+};
+
+static const MemoryRegionPortio ide_portio2_list[] = {
+ { 0, 1, 1, .read = ide_status_read, .write = ide_ctrl_write },
+ PORTIO_END_OF_LIST(),
+};
+
+int ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
+{
+ int ret;
+
+ /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
+ bridge has been setup properly to always register with ISA. */
+ ret = isa_register_portio_list(dev, &bus->portio_list,
+ iobase, ide_portio_list, bus, "ide");
+
+ if (ret == 0 && iobase2) {
+ ret = isa_register_portio_list(dev, &bus->portio2_list,
+ iobase2, ide_portio2_list, bus, "ide");
+ }
+
+ return ret;
+}
diff --git a/hw/ide/isa.c b/hw/ide/isa.c
new file mode 100644
index 000000000..24bbde24c
--- /dev/null
+++ b/hw/ide/isa.c
@@ -0,0 +1,138 @@
+/*
+ * QEMU IDE Emulation: ISA Bus support.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/isa/isa.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "sysemu/dma.h"
+
+#include "hw/ide/internal.h"
+#include "qom/object.h"
+
+/***********************************************************/
+/* ISA IDE definitions */
+
+#define TYPE_ISA_IDE "isa-ide"
+OBJECT_DECLARE_SIMPLE_TYPE(ISAIDEState, ISA_IDE)
+
+struct ISAIDEState {
+ ISADevice parent_obj;
+
+ IDEBus bus;
+ uint32_t iobase;
+ uint32_t iobase2;
+ uint32_t isairq;
+ qemu_irq irq;
+};
+
+static void isa_ide_reset(DeviceState *d)
+{
+ ISAIDEState *s = ISA_IDE(d);
+
+ ide_bus_reset(&s->bus);
+}
+
+static const VMStateDescription vmstate_ide_isa = {
+ .name = "isa-ide",
+ .version_id = 3,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_IDE_BUS(bus, ISAIDEState),
+ VMSTATE_IDE_DRIVES(bus.ifs, ISAIDEState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void isa_ide_realizefn(DeviceState *dev, Error **errp)
+{
+ ISADevice *isadev = ISA_DEVICE(dev);
+ ISAIDEState *s = ISA_IDE(dev);
+
+ ide_bus_init(&s->bus, sizeof(s->bus), dev, 0, 2);
+ ide_init_ioport(&s->bus, isadev, s->iobase, s->iobase2);
+ isa_init_irq(isadev, &s->irq, s->isairq);
+ ide_init2(&s->bus, s->irq);
+ vmstate_register(VMSTATE_IF(dev), 0, &vmstate_ide_isa, s);
+ ide_register_restart_cb(&s->bus);
+}
+
+ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq,
+ DriveInfo *hd0, DriveInfo *hd1)
+{
+ DeviceState *dev;
+ ISADevice *isadev;
+ ISAIDEState *s;
+
+ isadev = isa_new(TYPE_ISA_IDE);
+ dev = DEVICE(isadev);
+ qdev_prop_set_uint32(dev, "iobase", iobase);
+ qdev_prop_set_uint32(dev, "iobase2", iobase2);
+ qdev_prop_set_uint32(dev, "irq", isairq);
+ isa_realize_and_unref(isadev, bus, &error_fatal);
+
+ s = ISA_IDE(dev);
+ if (hd0) {
+ ide_create_drive(&s->bus, 0, hd0);
+ }
+ if (hd1) {
+ ide_create_drive(&s->bus, 1, hd1);
+ }
+ return isadev;
+}
+
+static Property isa_ide_properties[] = {
+ DEFINE_PROP_UINT32("iobase", ISAIDEState, iobase, 0x1f0),
+ DEFINE_PROP_UINT32("iobase2", ISAIDEState, iobase2, 0x3f6),
+ DEFINE_PROP_UINT32("irq", ISAIDEState, isairq, 14),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void isa_ide_class_initfn(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = isa_ide_realizefn;
+ dc->fw_name = "ide";
+ dc->reset = isa_ide_reset;
+ device_class_set_props(dc, isa_ide_properties);
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo isa_ide_info = {
+ .name = TYPE_ISA_IDE,
+ .parent = TYPE_ISA_DEVICE,
+ .instance_size = sizeof(ISAIDEState),
+ .class_init = isa_ide_class_initfn,
+};
+
+static void isa_ide_register_types(void)
+{
+ type_register_static(&isa_ide_info);
+}
+
+type_init(isa_ide_register_types)
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
new file mode 100644
index 000000000..b03d401ce
--- /dev/null
+++ b/hw/ide/macio.c
@@ -0,0 +1,513 @@
+/*
+ * QEMU IDE Emulation: MacIO support.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/ppc/mac.h"
+#include "hw/ppc/mac_dbdma.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "qemu/module.h"
+#include "hw/misc/macio/macio.h"
+#include "sysemu/block-backend.h"
+#include "sysemu/dma.h"
+
+#include "hw/ide/internal.h"
+
+/* debug MACIO */
+// #define DEBUG_MACIO
+
+#ifdef DEBUG_MACIO
+static const int debug_macio = 1;
+#else
+static const int debug_macio = 0;
+#endif
+
+#define MACIO_DPRINTF(fmt, ...) do { \
+ if (debug_macio) { \
+ printf(fmt , ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+
+/***********************************************************/
+/* MacIO based PowerPC IDE */
+
+#define MACIO_PAGE_SIZE 4096
+
+static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
+{
+ DBDMA_io *io = opaque;
+ MACIOIDEState *m = io->opaque;
+ IDEState *s = idebus_active_if(&m->bus);
+ int64_t offset;
+
+ MACIO_DPRINTF("pmac_ide_atapi_transfer_cb\n");
+
+ if (ret < 0) {
+ MACIO_DPRINTF("DMA error: %d\n", ret);
+ qemu_sglist_destroy(&s->sg);
+ ide_atapi_io_error(s, ret);
+ goto done;
+ }
+
+ if (!m->dma_active) {
+ MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
+ s->nsector, io->len, s->status);
+ /* data not ready yet, wait for the channel to get restarted */
+ io->processing = false;
+ return;
+ }
+
+ if (s->io_buffer_size <= 0) {
+ MACIO_DPRINTF("End of IDE transfer\n");
+ qemu_sglist_destroy(&s->sg);
+ ide_atapi_cmd_ok(s);
+ m->dma_active = false;
+ goto done;
+ }
+
+ if (io->len == 0) {
+ MACIO_DPRINTF("End of DMA transfer\n");
+ goto done;
+ }
+
+ if (s->lba == -1) {
+ /* Non-block ATAPI transfer - just copy to RAM */
+ s->io_buffer_size = MIN(s->io_buffer_size, io->len);
+ dma_memory_write(&address_space_memory, io->addr, s->io_buffer,
+ s->io_buffer_size);
+ io->len = 0;
+ ide_atapi_cmd_ok(s);
+ m->dma_active = false;
+ goto done;
+ }
+
+ /* Calculate current offset */
+ offset = ((int64_t)s->lba << 11) + s->io_buffer_index;
+
+ qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
+ &address_space_memory);
+ qemu_sglist_add(&s->sg, io->addr, io->len);
+ s->io_buffer_size -= io->len;
+ s->io_buffer_index += io->len;
+ io->len = 0;
+
+ s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset, 0x1,
+ pmac_ide_atapi_transfer_cb, io);
+ return;
+
+done:
+ dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
+ io->dir, io->dma_len);
+
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(s->blk), &s->acct);
+ } else {
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+ }
+
+ ide_set_inactive(s, false);
+ io->dma_end(opaque);
+}
+
+static void pmac_ide_transfer_cb(void *opaque, int ret)
+{
+ DBDMA_io *io = opaque;
+ MACIOIDEState *m = io->opaque;
+ IDEState *s = idebus_active_if(&m->bus);
+ int64_t offset;
+
+ MACIO_DPRINTF("pmac_ide_transfer_cb\n");
+
+ if (ret < 0) {
+ MACIO_DPRINTF("DMA error: %d\n", ret);
+ qemu_sglist_destroy(&s->sg);
+ ide_dma_error(s);
+ goto done;
+ }
+
+ if (!m->dma_active) {
+ MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
+ s->nsector, io->len, s->status);
+ /* data not ready yet, wait for the channel to get restarted */
+ io->processing = false;
+ return;
+ }
+
+ if (s->io_buffer_size <= 0) {
+ MACIO_DPRINTF("End of IDE transfer\n");
+ qemu_sglist_destroy(&s->sg);
+ s->status = READY_STAT | SEEK_STAT;
+ ide_set_irq(s->bus);
+ m->dma_active = false;
+ goto done;
+ }
+
+ if (io->len == 0) {
+ MACIO_DPRINTF("End of DMA transfer\n");
+ goto done;
+ }
+
+ /* Calculate number of sectors */
+ offset = (ide_get_sector(s) << 9) + s->io_buffer_index;
+
+ qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
+ &address_space_memory);
+ qemu_sglist_add(&s->sg, io->addr, io->len);
+ s->io_buffer_size -= io->len;
+ s->io_buffer_index += io->len;
+ io->len = 0;
+
+ switch (s->dma_cmd) {
+ case IDE_DMA_READ:
+ s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset, 0x1,
+ pmac_ide_atapi_transfer_cb, io);
+ break;
+ case IDE_DMA_WRITE:
+ s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset, 0x1,
+ pmac_ide_transfer_cb, io);
+ break;
+ case IDE_DMA_TRIM:
+ s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk), &s->sg,
+ offset, 0x1, ide_issue_trim, s,
+ pmac_ide_transfer_cb, io,
+ DMA_DIRECTION_TO_DEVICE);
+ break;
+ default:
+ abort();
+ }
+
+ return;
+
+done:
+ dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len,
+ io->dir, io->dma_len);
+
+ if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(s->blk), &s->acct);
+ } else {
+ block_acct_done(blk_get_stats(s->blk), &s->acct);
+ }
+ }
+
+ ide_set_inactive(s, false);
+ io->dma_end(opaque);
+}
+
+static void pmac_ide_transfer(DBDMA_io *io)
+{
+ MACIOIDEState *m = io->opaque;
+ IDEState *s = idebus_active_if(&m->bus);
+
+ MACIO_DPRINTF("\n");
+
+ if (s->drive_kind == IDE_CD) {
+ block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
+ BLOCK_ACCT_READ);
+
+ pmac_ide_atapi_transfer_cb(io, 0);
+ return;
+ }
+
+ switch (s->dma_cmd) {
+ case IDE_DMA_READ:
+ block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
+ BLOCK_ACCT_READ);
+ break;
+ case IDE_DMA_WRITE:
+ block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
+ BLOCK_ACCT_WRITE);
+ break;
+ default:
+ break;
+ }
+
+ pmac_ide_transfer_cb(io, 0);
+}
+
+static void pmac_ide_flush(DBDMA_io *io)
+{
+ MACIOIDEState *m = io->opaque;
+ IDEState *s = idebus_active_if(&m->bus);
+
+ if (s->bus->dma->aiocb) {
+ blk_drain(s->blk);
+ }
+}
+
+/* PowerMac IDE memory IO */
+static uint64_t pmac_ide_read(void *opaque, hwaddr addr, unsigned size)
+{
+ MACIOIDEState *d = opaque;
+ uint64_t retval = 0xffffffff;
+ int reg = addr >> 4;
+
+ switch (reg) {
+ case 0x0:
+ if (size == 2) {
+ retval = ide_data_readw(&d->bus, 0);
+ } else if (size == 4) {
+ retval = ide_data_readl(&d->bus, 0);
+ }
+ break;
+ case 0x1 ... 0x7:
+ if (size == 1) {
+ retval = ide_ioport_read(&d->bus, reg);
+ }
+ break;
+ case 0x8:
+ case 0x16:
+ if (size == 1) {
+ retval = ide_status_read(&d->bus, 0);
+ }
+ break;
+ case 0x20:
+ if (size == 4) {
+ retval = d->timing_reg;
+ }
+ break;
+ case 0x30:
+ /* This is an interrupt state register that only exists
+ * in the KeyLargo and later variants. Bit 0x8000_0000
+ * latches the DMA interrupt and has to be written to
+ * clear. Bit 0x4000_0000 is an image of the disk
+ * interrupt. MacOS X relies on this and will hang if
+ * we don't provide at least the disk interrupt
+ */
+ if (size == 4) {
+ retval = d->irq_reg;
+ }
+ break;
+ }
+
+ return retval;
+}
+
+
+static void pmac_ide_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ MACIOIDEState *d = opaque;
+ int reg = addr >> 4;
+
+ switch (reg) {
+ case 0x0:
+ if (size == 2) {
+ ide_data_writew(&d->bus, 0, val);
+ } else if (size == 4) {
+ ide_data_writel(&d->bus, 0, val);
+ }
+ break;
+ case 0x1 ... 0x7:
+ if (size == 1) {
+ ide_ioport_write(&d->bus, reg, val);
+ }
+ break;
+ case 0x8:
+ case 0x16:
+ if (size == 1) {
+ ide_ctrl_write(&d->bus, 0, val);
+ }
+ break;
+ case 0x20:
+ if (size == 4) {
+ d->timing_reg = val;
+ }
+ break;
+ case 0x30:
+ if (size == 4) {
+ if (val & 0x80000000u) {
+ d->irq_reg &= 0x7fffffff;
+ }
+ }
+ break;
+ }
+}
+
+static const MemoryRegionOps pmac_ide_ops = {
+ .read = pmac_ide_read,
+ .write = pmac_ide_write,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static const VMStateDescription vmstate_pmac = {
+ .name = "ide",
+ .version_id = 5,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_IDE_BUS(bus, MACIOIDEState),
+ VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState),
+ VMSTATE_BOOL(dma_active, MACIOIDEState),
+ VMSTATE_UINT32(timing_reg, MACIOIDEState),
+ VMSTATE_UINT32(irq_reg, MACIOIDEState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void macio_ide_reset(DeviceState *dev)
+{
+ MACIOIDEState *d = MACIO_IDE(dev);
+
+ ide_bus_reset(&d->bus);
+}
+
+static int ide_nop_int(const IDEDMA *dma, bool is_write)
+{
+ return 0;
+}
+
+static int32_t ide_nop_int32(const IDEDMA *dma, int32_t l)
+{
+ return 0;
+}
+
+static void ide_dbdma_start(const IDEDMA *dma, IDEState *s,
+ BlockCompletionFunc *cb)
+{
+ MACIOIDEState *m = container_of(dma, MACIOIDEState, dma);
+
+ s->io_buffer_index = 0;
+ if (s->drive_kind == IDE_CD) {
+ s->io_buffer_size = s->packet_transfer_size;
+ } else {
+ s->io_buffer_size = s->nsector * BDRV_SECTOR_SIZE;
+ }
+
+ MACIO_DPRINTF("\n\n------------ IDE transfer\n");
+ MACIO_DPRINTF("buffer_size: %x buffer_index: %x\n",
+ s->io_buffer_size, s->io_buffer_index);
+ MACIO_DPRINTF("lba: %x size: %x\n", s->lba, s->io_buffer_size);
+ MACIO_DPRINTF("-------------------------\n");
+
+ m->dma_active = true;
+ DBDMA_kick(m->dbdma);
+}
+
+static const IDEDMAOps dbdma_ops = {
+ .start_dma = ide_dbdma_start,
+ .prepare_buf = ide_nop_int32,
+ .rw_buf = ide_nop_int,
+};
+
+static void macio_ide_realizefn(DeviceState *dev, Error **errp)
+{
+ MACIOIDEState *s = MACIO_IDE(dev);
+
+ ide_init2(&s->bus, s->ide_irq);
+
+ /* Register DMA callbacks */
+ s->dma.ops = &dbdma_ops;
+ s->bus.dma = &s->dma;
+}
+
+static void pmac_ide_irq(void *opaque, int n, int level)
+{
+ MACIOIDEState *s = opaque;
+ uint32_t mask = 0x80000000u >> n;
+
+ /* We need to reflect the IRQ state in the irq register */
+ if (level) {
+ s->irq_reg |= mask;
+ } else {
+ s->irq_reg &= ~mask;
+ }
+
+ if (n) {
+ qemu_set_irq(s->real_ide_irq, level);
+ } else {
+ qemu_set_irq(s->real_dma_irq, level);
+ }
+}
+
+static void macio_ide_initfn(Object *obj)
+{
+ SysBusDevice *d = SYS_BUS_DEVICE(obj);
+ MACIOIDEState *s = MACIO_IDE(obj);
+
+ ide_bus_init(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
+ memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000);
+ sysbus_init_mmio(d, &s->mem);
+ sysbus_init_irq(d, &s->real_ide_irq);
+ sysbus_init_irq(d, &s->real_dma_irq);
+ s->dma_irq = qemu_allocate_irq(pmac_ide_irq, s, 0);
+ s->ide_irq = qemu_allocate_irq(pmac_ide_irq, s, 1);
+
+ object_property_add_link(obj, "dbdma", TYPE_MAC_DBDMA,
+ (Object **) &s->dbdma,
+ qdev_prop_allow_set_link_before_realize, 0);
+}
+
+static Property macio_ide_properties[] = {
+ DEFINE_PROP_UINT32("channel", MACIOIDEState, channel, 0),
+ DEFINE_PROP_UINT32("addr", MACIOIDEState, addr, -1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void macio_ide_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = macio_ide_realizefn;
+ dc->reset = macio_ide_reset;
+ device_class_set_props(dc, macio_ide_properties);
+ dc->vmsd = &vmstate_pmac;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo macio_ide_type_info = {
+ .name = TYPE_MACIO_IDE,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(MACIOIDEState),
+ .instance_init = macio_ide_initfn,
+ .class_init = macio_ide_class_init,
+};
+
+static void macio_ide_register_types(void)
+{
+ type_register_static(&macio_ide_type_info);
+}
+
+/* hd_table must contain 2 block drivers */
+void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (hd_table[i]) {
+ ide_create_drive(&s->bus, i, hd_table[i]);
+ }
+ }
+}
+
+void macio_ide_register_dma(MACIOIDEState *s)
+{
+ DBDMA_register_channel(s->dbdma, s->channel, s->dma_irq,
+ pmac_ide_transfer, pmac_ide_flush, s);
+}
+
+type_init(macio_ide_register_types)
diff --git a/hw/ide/meson.build b/hw/ide/meson.build
new file mode 100644
index 000000000..ddcb3b28d
--- /dev/null
+++ b/hw/ide/meson.build
@@ -0,0 +1,14 @@
+softmmu_ss.add(when: 'CONFIG_AHCI', if_true: files('ahci.c'))
+softmmu_ss.add(when: 'CONFIG_AHCI_ICH9', if_true: files('ich.c'))
+softmmu_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('ahci-allwinner.c'))
+softmmu_ss.add(when: 'CONFIG_IDE_CMD646', if_true: files('cmd646.c'))
+softmmu_ss.add(when: 'CONFIG_IDE_CORE', if_true: files('core.c', 'atapi.c'))
+softmmu_ss.add(when: 'CONFIG_IDE_ISA', if_true: files('isa.c', 'ioport.c'))
+softmmu_ss.add(when: 'CONFIG_IDE_MACIO', if_true: files('macio.c'))
+softmmu_ss.add(when: 'CONFIG_IDE_MMIO', if_true: files('mmio.c'))
+softmmu_ss.add(when: 'CONFIG_IDE_PCI', if_true: files('pci.c'))
+softmmu_ss.add(when: 'CONFIG_IDE_PIIX', if_true: files('piix.c', 'ioport.c'))
+softmmu_ss.add(when: 'CONFIG_IDE_QDEV', if_true: files('qdev.c'))
+softmmu_ss.add(when: 'CONFIG_IDE_SII3112', if_true: files('sii3112.c'))
+softmmu_ss.add(when: 'CONFIG_IDE_VIA', if_true: files('via.c'))
+softmmu_ss.add(when: 'CONFIG_MICRODRIVE', if_true: files('microdrive.c'))
diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c
new file mode 100644
index 000000000..6df9b4cbb
--- /dev/null
+++ b/hw/ide/microdrive.c
@@ -0,0 +1,643 @@
+/*
+ * QEMU IDE Emulation: microdrive (CF / PCMCIA)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/pcmcia.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "sysemu/dma.h"
+
+#include "hw/ide/internal.h"
+#include "qom/object.h"
+
+#define TYPE_MICRODRIVE "microdrive"
+OBJECT_DECLARE_SIMPLE_TYPE(MicroDriveState, MICRODRIVE)
+
+/***********************************************************/
+/* CF-ATA Microdrive */
+
+#define METADATA_SIZE 0x20
+
+/* DSCM-1XXXX Microdrive hard disk with CF+ II / PCMCIA interface. */
+
+struct MicroDriveState {
+ /*< private >*/
+ PCMCIACardState parent_obj;
+ /*< public >*/
+
+ IDEBus bus;
+ uint32_t attr_base;
+ uint32_t io_base;
+
+ /* Card state */
+ uint8_t opt;
+ uint8_t stat;
+ uint8_t pins;
+
+ uint8_t ctrl;
+ uint16_t io;
+ uint8_t cycle;
+};
+
+/* Register bitfields */
+enum md_opt {
+ OPT_MODE_MMAP = 0,
+ OPT_MODE_IOMAP16 = 1,
+ OPT_MODE_IOMAP1 = 2,
+ OPT_MODE_IOMAP2 = 3,
+ OPT_MODE = 0x3f,
+ OPT_LEVIREQ = 0x40,
+ OPT_SRESET = 0x80,
+};
+enum md_cstat {
+ STAT_INT = 0x02,
+ STAT_PWRDWN = 0x04,
+ STAT_XE = 0x10,
+ STAT_IOIS8 = 0x20,
+ STAT_SIGCHG = 0x40,
+ STAT_CHANGED = 0x80,
+};
+enum md_pins {
+ PINS_MRDY = 0x02,
+ PINS_CRDY = 0x20,
+};
+enum md_ctrl {
+ CTRL_IEN = 0x02,
+ CTRL_SRST = 0x04,
+};
+
+static inline void md_interrupt_update(MicroDriveState *s)
+{
+ PCMCIACardState *card = PCMCIA_CARD(s);
+
+ if (card->slot == NULL) {
+ return;
+ }
+
+ qemu_set_irq(card->slot->irq,
+ !(s->stat & STAT_INT) && /* Inverted */
+ !(s->ctrl & (CTRL_IEN | CTRL_SRST)) &&
+ !(s->opt & OPT_SRESET));
+}
+
+static void md_set_irq(void *opaque, int irq, int level)
+{
+ MicroDriveState *s = opaque;
+
+ if (level) {
+ s->stat |= STAT_INT;
+ } else {
+ s->stat &= ~STAT_INT;
+ }
+
+ md_interrupt_update(s);
+}
+
+static void md_reset(DeviceState *dev)
+{
+ MicroDriveState *s = MICRODRIVE(dev);
+
+ s->opt = OPT_MODE_MMAP;
+ s->stat = 0;
+ s->pins = 0;
+ s->cycle = 0;
+ s->ctrl = 0;
+ ide_bus_reset(&s->bus);
+}
+
+static uint8_t md_attr_read(PCMCIACardState *card, uint32_t at)
+{
+ MicroDriveState *s = MICRODRIVE(card);
+ PCMCIACardClass *pcc = PCMCIA_CARD_GET_CLASS(card);
+
+ if (at < s->attr_base) {
+ if (at < pcc->cis_len) {
+ return pcc->cis[at];
+ } else {
+ return 0x00;
+ }
+ }
+
+ at -= s->attr_base;
+
+ switch (at) {
+ case 0x00: /* Configuration Option Register */
+ return s->opt;
+ case 0x02: /* Card Configuration Status Register */
+ if (s->ctrl & CTRL_IEN) {
+ return s->stat & ~STAT_INT;
+ } else {
+ return s->stat;
+ }
+ case 0x04: /* Pin Replacement Register */
+ return (s->pins & PINS_CRDY) | 0x0c;
+ case 0x06: /* Socket and Copy Register */
+ return 0x00;
+#ifdef VERBOSE
+ default:
+ printf("%s: Bad attribute space register %02x\n", __func__, at);
+#endif
+ }
+
+ return 0;
+}
+
+static void md_attr_write(PCMCIACardState *card, uint32_t at, uint8_t value)
+{
+ MicroDriveState *s = MICRODRIVE(card);
+
+ at -= s->attr_base;
+
+ switch (at) {
+ case 0x00: /* Configuration Option Register */
+ s->opt = value & 0xcf;
+ if (value & OPT_SRESET) {
+ device_legacy_reset(DEVICE(s));
+ }
+ md_interrupt_update(s);
+ break;
+ case 0x02: /* Card Configuration Status Register */
+ if ((s->stat ^ value) & STAT_PWRDWN) {
+ s->pins |= PINS_CRDY;
+ }
+ s->stat &= 0x82;
+ s->stat |= value & 0x74;
+ md_interrupt_update(s);
+ /* Word 170 in Identify Device must be equal to STAT_XE */
+ break;
+ case 0x04: /* Pin Replacement Register */
+ s->pins &= PINS_CRDY;
+ s->pins |= value & PINS_MRDY;
+ break;
+ case 0x06: /* Socket and Copy Register */
+ break;
+ default:
+ printf("%s: Bad attribute space register %02x\n", __func__, at);
+ }
+}
+
+static uint16_t md_common_read(PCMCIACardState *card, uint32_t at)
+{
+ MicroDriveState *s = MICRODRIVE(card);
+ IDEState *ifs;
+ uint16_t ret;
+ at -= s->io_base;
+
+ switch (s->opt & OPT_MODE) {
+ case OPT_MODE_MMAP:
+ if ((at & ~0x3ff) == 0x400) {
+ at = 0;
+ }
+ break;
+ case OPT_MODE_IOMAP16:
+ at &= 0xf;
+ break;
+ case OPT_MODE_IOMAP1:
+ if ((at & ~0xf) == 0x3f0) {
+ at -= 0x3e8;
+ } else if ((at & ~0xf) == 0x1f0) {
+ at -= 0x1f0;
+ }
+ break;
+ case OPT_MODE_IOMAP2:
+ if ((at & ~0xf) == 0x370) {
+ at -= 0x368;
+ } else if ((at & ~0xf) == 0x170) {
+ at -= 0x170;
+ }
+ }
+
+ switch (at) {
+ case 0x0: /* Even RD Data */
+ case 0x8:
+ return ide_data_readw(&s->bus, 0);
+
+ /* TODO: 8-bit accesses */
+ if (s->cycle) {
+ ret = s->io >> 8;
+ } else {
+ s->io = ide_data_readw(&s->bus, 0);
+ ret = s->io & 0xff;
+ }
+ s->cycle = !s->cycle;
+ return ret;
+ case 0x9: /* Odd RD Data */
+ return s->io >> 8;
+ case 0xd: /* Error */
+ return ide_ioport_read(&s->bus, 0x1);
+ case 0xe: /* Alternate Status */
+ ifs = idebus_active_if(&s->bus);
+ if (ifs->blk) {
+ return ifs->status;
+ } else {
+ return 0;
+ }
+ case 0xf: /* Device Address */
+ ifs = idebus_active_if(&s->bus);
+ return 0xc2 | ((~ifs->select << 2) & 0x3c);
+ default:
+ return ide_ioport_read(&s->bus, at);
+ }
+
+ return 0;
+}
+
+static void md_common_write(PCMCIACardState *card, uint32_t at, uint16_t value)
+{
+ MicroDriveState *s = MICRODRIVE(card);
+ at -= s->io_base;
+
+ switch (s->opt & OPT_MODE) {
+ case OPT_MODE_MMAP:
+ if ((at & ~0x3ff) == 0x400) {
+ at = 0;
+ }
+ break;
+ case OPT_MODE_IOMAP16:
+ at &= 0xf;
+ break;
+ case OPT_MODE_IOMAP1:
+ if ((at & ~0xf) == 0x3f0) {
+ at -= 0x3e8;
+ } else if ((at & ~0xf) == 0x1f0) {
+ at -= 0x1f0;
+ }
+ break;
+ case OPT_MODE_IOMAP2:
+ if ((at & ~0xf) == 0x370) {
+ at -= 0x368;
+ } else if ((at & ~0xf) == 0x170) {
+ at -= 0x170;
+ }
+ }
+
+ switch (at) {
+ case 0x0: /* Even WR Data */
+ case 0x8:
+ ide_data_writew(&s->bus, 0, value);
+ break;
+
+ /* TODO: 8-bit accesses */
+ if (s->cycle) {
+ ide_data_writew(&s->bus, 0, s->io | (value << 8));
+ } else {
+ s->io = value & 0xff;
+ }
+ s->cycle = !s->cycle;
+ break;
+ case 0x9:
+ s->io = value & 0xff;
+ s->cycle = !s->cycle;
+ break;
+ case 0xd: /* Features */
+ ide_ioport_write(&s->bus, 0x1, value);
+ break;
+ case 0xe: /* Device Control */
+ s->ctrl = value;
+ if (value & CTRL_SRST) {
+ device_legacy_reset(DEVICE(s));
+ }
+ md_interrupt_update(s);
+ break;
+ default:
+ if (s->stat & STAT_PWRDWN) {
+ s->pins |= PINS_CRDY;
+ s->stat &= ~STAT_PWRDWN;
+ }
+ ide_ioport_write(&s->bus, at, value);
+ }
+}
+
+static const VMStateDescription vmstate_microdrive = {
+ .name = "microdrive",
+ .version_id = 3,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(opt, MicroDriveState),
+ VMSTATE_UINT8(stat, MicroDriveState),
+ VMSTATE_UINT8(pins, MicroDriveState),
+ VMSTATE_UINT8(ctrl, MicroDriveState),
+ VMSTATE_UINT16(io, MicroDriveState),
+ VMSTATE_UINT8(cycle, MicroDriveState),
+ VMSTATE_IDE_BUS(bus, MicroDriveState),
+ VMSTATE_IDE_DRIVES(bus.ifs, MicroDriveState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const uint8_t dscm1xxxx_cis[0x14a] = {
+ [0x000] = CISTPL_DEVICE, /* 5V Device Information */
+ [0x002] = 0x03, /* Tuple length = 4 bytes */
+ [0x004] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */
+ [0x006] = 0x01, /* Size = 2K bytes */
+ [0x008] = CISTPL_ENDMARK,
+
+ [0x00a] = CISTPL_DEVICE_OC, /* Additional Device Information */
+ [0x00c] = 0x04, /* Tuple length = 4 byest */
+ [0x00e] = 0x03, /* Conditions: Ext = 0, Vcc 3.3V, MWAIT = 1 */
+ [0x010] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */
+ [0x012] = 0x01, /* Size = 2K bytes */
+ [0x014] = CISTPL_ENDMARK,
+
+ [0x016] = CISTPL_JEDEC_C, /* JEDEC ID */
+ [0x018] = 0x02, /* Tuple length = 2 bytes */
+ [0x01a] = 0xdf, /* PC Card ATA with no Vpp required */
+ [0x01c] = 0x01,
+
+ [0x01e] = CISTPL_MANFID, /* Manufacture ID */
+ [0x020] = 0x04, /* Tuple length = 4 bytes */
+ [0x022] = 0xa4, /* TPLMID_MANF = 00a4 (IBM) */
+ [0x024] = 0x00,
+ [0x026] = 0x00, /* PLMID_CARD = 0000 */
+ [0x028] = 0x00,
+
+ [0x02a] = CISTPL_VERS_1, /* Level 1 Version */
+ [0x02c] = 0x12, /* Tuple length = 23 bytes */
+ [0x02e] = 0x04, /* Major Version = JEIDA 4.2 / PCMCIA 2.1 */
+ [0x030] = 0x01, /* Minor Version = 1 */
+ [0x032] = 'I',
+ [0x034] = 'B',
+ [0x036] = 'M',
+ [0x038] = 0x00,
+ [0x03a] = 'm',
+ [0x03c] = 'i',
+ [0x03e] = 'c',
+ [0x040] = 'r',
+ [0x042] = 'o',
+ [0x044] = 'd',
+ [0x046] = 'r',
+ [0x048] = 'i',
+ [0x04a] = 'v',
+ [0x04c] = 'e',
+ [0x04e] = 0x00,
+ [0x050] = CISTPL_ENDMARK,
+
+ [0x052] = CISTPL_FUNCID, /* Function ID */
+ [0x054] = 0x02, /* Tuple length = 2 bytes */
+ [0x056] = 0x04, /* TPLFID_FUNCTION = Fixed Disk */
+ [0x058] = 0x01, /* TPLFID_SYSINIT: POST = 1, ROM = 0 */
+
+ [0x05a] = CISTPL_FUNCE, /* Function Extension */
+ [0x05c] = 0x02, /* Tuple length = 2 bytes */
+ [0x05e] = 0x01, /* TPLFE_TYPE = Disk Device Interface */
+ [0x060] = 0x01, /* TPLFE_DATA = PC Card ATA Interface */
+
+ [0x062] = CISTPL_FUNCE, /* Function Extension */
+ [0x064] = 0x03, /* Tuple length = 3 bytes */
+ [0x066] = 0x02, /* TPLFE_TYPE = Basic PC Card ATA Interface */
+ [0x068] = 0x08, /* TPLFE_DATA: Rotating, Unique, Single */
+ [0x06a] = 0x0f, /* TPLFE_DATA: Sleep, Standby, Idle, Auto */
+
+ [0x06c] = CISTPL_CONFIG, /* Configuration */
+ [0x06e] = 0x05, /* Tuple length = 5 bytes */
+ [0x070] = 0x01, /* TPCC_RASZ = 2 bytes, TPCC_RMSZ = 1 byte */
+ [0x072] = 0x07, /* TPCC_LAST = 7 */
+ [0x074] = 0x00, /* TPCC_RADR = 0200 */
+ [0x076] = 0x02,
+ [0x078] = 0x0f, /* TPCC_RMSK = 200, 202, 204, 206 */
+
+ [0x07a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
+ [0x07c] = 0x0b, /* Tuple length = 11 bytes */
+ [0x07e] = 0xc0, /* TPCE_INDX = Memory Mode, Default, Iface */
+ [0x080] = 0xc0, /* TPCE_IF = Memory, no BVDs, no WP, READY */
+ [0x082] = 0xa1, /* TPCE_FS = Vcc only, no I/O, Memory, Misc */
+ [0x084] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */
+ [0x086] = 0x55, /* NomV: 5.0 V */
+ [0x088] = 0x4d, /* MinV: 4.5 V */
+ [0x08a] = 0x5d, /* MaxV: 5.5 V */
+ [0x08c] = 0x4e, /* Peakl: 450 mA */
+ [0x08e] = 0x08, /* TPCE_MS = 1 window, 1 byte, Host address */
+ [0x090] = 0x00, /* Window descriptor: Window length = 0 */
+ [0x092] = 0x20, /* TPCE_MI: support power down mode, RW */
+
+ [0x094] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
+ [0x096] = 0x06, /* Tuple length = 6 bytes */
+ [0x098] = 0x00, /* TPCE_INDX = Memory Mode, no Default */
+ [0x09a] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */
+ [0x09c] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */
+ [0x09e] = 0xb5, /* NomV: 3.3 V */
+ [0x0a0] = 0x1e,
+ [0x0a2] = 0x3e, /* Peakl: 350 mA */
+
+ [0x0a4] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
+ [0x0a6] = 0x0d, /* Tuple length = 13 bytes */
+ [0x0a8] = 0xc1, /* TPCE_INDX = I/O and Memory Mode, Default */
+ [0x0aa] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */
+ [0x0ac] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */
+ [0x0ae] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */
+ [0x0b0] = 0x55, /* NomV: 5.0 V */
+ [0x0b2] = 0x4d, /* MinV: 4.5 V */
+ [0x0b4] = 0x5d, /* MaxV: 5.5 V */
+ [0x0b6] = 0x4e, /* Peakl: 450 mA */
+ [0x0b8] = 0x64, /* TPCE_IO = 16-byte boundary, 16/8 accesses */
+ [0x0ba] = 0xf0, /* TPCE_IR = MASK, Level, Pulse, Share */
+ [0x0bc] = 0xff, /* IRQ0..IRQ7 supported */
+ [0x0be] = 0xff, /* IRQ8..IRQ15 supported */
+ [0x0c0] = 0x20, /* TPCE_MI = support power down mode */
+
+ [0x0c2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
+ [0x0c4] = 0x06, /* Tuple length = 6 bytes */
+ [0x0c6] = 0x01, /* TPCE_INDX = I/O and Memory Mode */
+ [0x0c8] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */
+ [0x0ca] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */
+ [0x0cc] = 0xb5, /* NomV: 3.3 V */
+ [0x0ce] = 0x1e,
+ [0x0d0] = 0x3e, /* Peakl: 350 mA */
+
+ [0x0d2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
+ [0x0d4] = 0x12, /* Tuple length = 18 bytes */
+ [0x0d6] = 0xc2, /* TPCE_INDX = I/O Primary Mode */
+ [0x0d8] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */
+ [0x0da] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */
+ [0x0dc] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */
+ [0x0de] = 0x55, /* NomV: 5.0 V */
+ [0x0e0] = 0x4d, /* MinV: 4.5 V */
+ [0x0e2] = 0x5d, /* MaxV: 5.5 V */
+ [0x0e4] = 0x4e, /* Peakl: 450 mA */
+ [0x0e6] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */
+ [0x0e8] = 0x61, /* Range: 2 fields, 2 bytes addr, 1 byte len */
+ [0x0ea] = 0xf0, /* Field 1 address = 0x01f0 */
+ [0x0ec] = 0x01,
+ [0x0ee] = 0x07, /* Address block length = 8 */
+ [0x0f0] = 0xf6, /* Field 2 address = 0x03f6 */
+ [0x0f2] = 0x03,
+ [0x0f4] = 0x01, /* Address block length = 2 */
+ [0x0f6] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */
+ [0x0f8] = 0x20, /* TPCE_MI = support power down mode */
+
+ [0x0fa] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
+ [0x0fc] = 0x06, /* Tuple length = 6 bytes */
+ [0x0fe] = 0x02, /* TPCE_INDX = I/O Primary Mode, no Default */
+ [0x100] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */
+ [0x102] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */
+ [0x104] = 0xb5, /* NomV: 3.3 V */
+ [0x106] = 0x1e,
+ [0x108] = 0x3e, /* Peakl: 350 mA */
+
+ [0x10a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
+ [0x10c] = 0x12, /* Tuple length = 18 bytes */
+ [0x10e] = 0xc3, /* TPCE_INDX = I/O Secondary Mode, Default */
+ [0x110] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */
+ [0x112] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */
+ [0x114] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */
+ [0x116] = 0x55, /* NomV: 5.0 V */
+ [0x118] = 0x4d, /* MinV: 4.5 V */
+ [0x11a] = 0x5d, /* MaxV: 5.5 V */
+ [0x11c] = 0x4e, /* Peakl: 450 mA */
+ [0x11e] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */
+ [0x120] = 0x61, /* Range: 2 fields, 2 byte addr, 1 byte len */
+ [0x122] = 0x70, /* Field 1 address = 0x0170 */
+ [0x124] = 0x01,
+ [0x126] = 0x07, /* Address block length = 8 */
+ [0x128] = 0x76, /* Field 2 address = 0x0376 */
+ [0x12a] = 0x03,
+ [0x12c] = 0x01, /* Address block length = 2 */
+ [0x12e] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */
+ [0x130] = 0x20, /* TPCE_MI = support power down mode */
+
+ [0x132] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */
+ [0x134] = 0x06, /* Tuple length = 6 bytes */
+ [0x136] = 0x03, /* TPCE_INDX = I/O Secondary Mode */
+ [0x138] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */
+ [0x13a] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */
+ [0x13c] = 0xb5, /* NomV: 3.3 V */
+ [0x13e] = 0x1e,
+ [0x140] = 0x3e, /* Peakl: 350 mA */
+
+ [0x142] = CISTPL_NO_LINK, /* No Link */
+ [0x144] = 0x00, /* Tuple length = 0 bytes */
+
+ [0x146] = CISTPL_END, /* Tuple End */
+};
+
+#define TYPE_DSCM1XXXX "dscm1xxxx"
+
+static int dscm1xxxx_attach(PCMCIACardState *card)
+{
+ MicroDriveState *md = MICRODRIVE(card);
+ PCMCIACardClass *pcc = PCMCIA_CARD_GET_CLASS(card);
+
+ md->attr_base = pcc->cis[0x74] | (pcc->cis[0x76] << 8);
+ md->io_base = 0x0;
+
+ device_legacy_reset(DEVICE(md));
+ md_interrupt_update(md);
+
+ return 0;
+}
+
+static int dscm1xxxx_detach(PCMCIACardState *card)
+{
+ MicroDriveState *md = MICRODRIVE(card);
+
+ device_legacy_reset(DEVICE(md));
+ return 0;
+}
+
+PCMCIACardState *dscm1xxxx_init(DriveInfo *dinfo)
+{
+ MicroDriveState *md;
+
+ md = MICRODRIVE(object_new(TYPE_DSCM1XXXX));
+ qdev_realize(DEVICE(md), NULL, &error_fatal);
+
+ if (dinfo != NULL) {
+ ide_create_drive(&md->bus, 0, dinfo);
+ }
+ md->bus.ifs[0].drive_kind = IDE_CFATA;
+ md->bus.ifs[0].mdata_size = METADATA_SIZE;
+ md->bus.ifs[0].mdata_storage = g_malloc0(METADATA_SIZE);
+
+ return PCMCIA_CARD(md);
+}
+
+static void dscm1xxxx_class_init(ObjectClass *oc, void *data)
+{
+ PCMCIACardClass *pcc = PCMCIA_CARD_CLASS(oc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ pcc->cis = dscm1xxxx_cis;
+ pcc->cis_len = sizeof(dscm1xxxx_cis);
+
+ pcc->attach = dscm1xxxx_attach;
+ pcc->detach = dscm1xxxx_detach;
+ /* Reason: Needs to be wired-up in code, see dscm1xxxx_init() */
+ dc->user_creatable = false;
+}
+
+static const TypeInfo dscm1xxxx_type_info = {
+ .name = TYPE_DSCM1XXXX,
+ .parent = TYPE_MICRODRIVE,
+ .class_init = dscm1xxxx_class_init,
+};
+
+static void microdrive_realize(DeviceState *dev, Error **errp)
+{
+ MicroDriveState *md = MICRODRIVE(dev);
+
+ ide_init2(&md->bus, qemu_allocate_irq(md_set_irq, md, 0));
+}
+
+static void microdrive_init(Object *obj)
+{
+ MicroDriveState *md = MICRODRIVE(obj);
+
+ ide_bus_init(&md->bus, sizeof(md->bus), DEVICE(obj), 0, 1);
+}
+
+static void microdrive_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PCMCIACardClass *pcc = PCMCIA_CARD_CLASS(oc);
+
+ pcc->attr_read = md_attr_read;
+ pcc->attr_write = md_attr_write;
+ pcc->common_read = md_common_read;
+ pcc->common_write = md_common_write;
+ pcc->io_read = md_common_read;
+ pcc->io_write = md_common_write;
+
+ dc->realize = microdrive_realize;
+ dc->reset = md_reset;
+ dc->vmsd = &vmstate_microdrive;
+}
+
+static const TypeInfo microdrive_type_info = {
+ .name = TYPE_MICRODRIVE,
+ .parent = TYPE_PCMCIA_CARD,
+ .instance_size = sizeof(MicroDriveState),
+ .instance_init = microdrive_init,
+ .abstract = true,
+ .class_init = microdrive_class_init,
+};
+
+static void microdrive_register_types(void)
+{
+ type_register_static(&microdrive_type_info);
+ type_register_static(&dscm1xxxx_type_info);
+}
+
+type_init(microdrive_register_types)
diff --git a/hw/ide/mmio.c b/hw/ide/mmio.c
new file mode 100644
index 000000000..fb2ebd484
--- /dev/null
+++ b/hw/ide/mmio.c
@@ -0,0 +1,189 @@
+/*
+ * QEMU IDE Emulation: mmio support (for embedded).
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "qemu/module.h"
+#include "sysemu/dma.h"
+
+#include "hw/ide/internal.h"
+#include "hw/qdev-properties.h"
+#include "qom/object.h"
+
+/***********************************************************/
+/* MMIO based ide port
+ * This emulates IDE device connected directly to the CPU bus without
+ * dedicated ide controller, which is often seen on embedded boards.
+ */
+
+#define TYPE_MMIO_IDE "mmio-ide"
+typedef struct MMIOIDEState MMIOState;
+DECLARE_INSTANCE_CHECKER(MMIOState, MMIO_IDE,
+ TYPE_MMIO_IDE)
+
+struct MMIOIDEState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ IDEBus bus;
+
+ uint32_t shift;
+ qemu_irq irq;
+ MemoryRegion iomem1, iomem2;
+};
+
+static void mmio_ide_reset(DeviceState *dev)
+{
+ MMIOState *s = MMIO_IDE(dev);
+
+ ide_bus_reset(&s->bus);
+}
+
+static uint64_t mmio_ide_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ MMIOState *s = opaque;
+ addr >>= s->shift;
+ if (addr & 7)
+ return ide_ioport_read(&s->bus, addr);
+ else
+ return ide_data_readw(&s->bus, 0);
+}
+
+static void mmio_ide_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ MMIOState *s = opaque;
+ addr >>= s->shift;
+ if (addr & 7)
+ ide_ioport_write(&s->bus, addr, val);
+ else
+ ide_data_writew(&s->bus, 0, val);
+}
+
+static const MemoryRegionOps mmio_ide_ops = {
+ .read = mmio_ide_read,
+ .write = mmio_ide_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static uint64_t mmio_ide_status_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ MMIOState *s= opaque;
+ return ide_status_read(&s->bus, 0);
+}
+
+static void mmio_ide_ctrl_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ MMIOState *s = opaque;
+ ide_ctrl_write(&s->bus, 0, val);
+}
+
+static const MemoryRegionOps mmio_ide_cs_ops = {
+ .read = mmio_ide_status_read,
+ .write = mmio_ide_ctrl_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static const VMStateDescription vmstate_ide_mmio = {
+ .name = "mmio-ide",
+ .version_id = 3,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_IDE_BUS(bus, MMIOState),
+ VMSTATE_IDE_DRIVES(bus.ifs, MMIOState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void mmio_ide_realizefn(DeviceState *dev, Error **errp)
+{
+ SysBusDevice *d = SYS_BUS_DEVICE(dev);
+ MMIOState *s = MMIO_IDE(dev);
+
+ ide_init2(&s->bus, s->irq);
+
+ memory_region_init_io(&s->iomem1, OBJECT(s), &mmio_ide_ops, s,
+ "ide-mmio.1", 16 << s->shift);
+ memory_region_init_io(&s->iomem2, OBJECT(s), &mmio_ide_cs_ops, s,
+ "ide-mmio.2", 2 << s->shift);
+ sysbus_init_mmio(d, &s->iomem1);
+ sysbus_init_mmio(d, &s->iomem2);
+}
+
+static void mmio_ide_initfn(Object *obj)
+{
+ SysBusDevice *d = SYS_BUS_DEVICE(obj);
+ MMIOState *s = MMIO_IDE(obj);
+
+ ide_bus_init(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
+ sysbus_init_irq(d, &s->irq);
+}
+
+static Property mmio_ide_properties[] = {
+ DEFINE_PROP_UINT32("shift", MMIOState, shift, 0),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+static void mmio_ide_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = mmio_ide_realizefn;
+ dc->reset = mmio_ide_reset;
+ device_class_set_props(dc, mmio_ide_properties);
+ dc->vmsd = &vmstate_ide_mmio;
+}
+
+static const TypeInfo mmio_ide_type_info = {
+ .name = TYPE_MMIO_IDE,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(MMIOState),
+ .instance_init = mmio_ide_initfn,
+ .class_init = mmio_ide_class_init,
+};
+
+static void mmio_ide_register_types(void)
+{
+ type_register_static(&mmio_ide_type_info);
+}
+
+void mmio_ide_init_drives(DeviceState *dev, DriveInfo *hd0, DriveInfo *hd1)
+{
+ MMIOState *s = MMIO_IDE(dev);
+
+ if (hd0 != NULL) {
+ ide_create_drive(&s->bus, 0, hd0);
+ }
+ if (hd1 != NULL) {
+ ide_create_drive(&s->bus, 1, hd1);
+ }
+}
+
+type_init(mmio_ide_register_types)
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
new file mode 100644
index 000000000..84ba73354
--- /dev/null
+++ b/hw/ide/pci.c
@@ -0,0 +1,534 @@
+/*
+ * QEMU IDE Emulation: PCI Bus support.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/pci/pci.h"
+#include "migration/vmstate.h"
+#include "sysemu/dma.h"
+#include "qemu/error-report.h"
+#include "qemu/module.h"
+#include "hw/ide/pci.h"
+#include "trace.h"
+
+#define BMDMA_PAGE_SIZE 4096
+
+#define BM_MIGRATION_COMPAT_STATUS_BITS \
+ (IDE_RETRY_DMA | IDE_RETRY_PIO | \
+ IDE_RETRY_READ | IDE_RETRY_FLUSH)
+
+static uint64_t pci_ide_status_read(void *opaque, hwaddr addr, unsigned size)
+{
+ IDEBus *bus = opaque;
+
+ if (addr != 2 || size != 1) {
+ return ((uint64_t)1 << (size * 8)) - 1;
+ }
+ return ide_status_read(bus, addr + 2);
+}
+
+static void pci_ide_ctrl_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ IDEBus *bus = opaque;
+
+ if (addr != 2 || size != 1) {
+ return;
+ }
+ ide_ctrl_write(bus, addr + 2, data);
+}
+
+const MemoryRegionOps pci_ide_cmd_le_ops = {
+ .read = pci_ide_status_read,
+ .write = pci_ide_ctrl_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static uint64_t pci_ide_data_read(void *opaque, hwaddr addr, unsigned size)
+{
+ IDEBus *bus = opaque;
+
+ if (size == 1) {
+ return ide_ioport_read(bus, addr);
+ } else if (addr == 0) {
+ if (size == 2) {
+ return ide_data_readw(bus, addr);
+ } else {
+ return ide_data_readl(bus, addr);
+ }
+ }
+ return ((uint64_t)1 << (size * 8)) - 1;
+}
+
+static void pci_ide_data_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ IDEBus *bus = opaque;
+
+ if (size == 1) {
+ ide_ioport_write(bus, addr, data);
+ } else if (addr == 0) {
+ if (size == 2) {
+ ide_data_writew(bus, addr, data);
+ } else {
+ ide_data_writel(bus, addr, data);
+ }
+ }
+}
+
+const MemoryRegionOps pci_ide_data_le_ops = {
+ .read = pci_ide_data_read,
+ .write = pci_ide_data_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void bmdma_start_dma(const IDEDMA *dma, IDEState *s,
+ BlockCompletionFunc *dma_cb)
+{
+ BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
+
+ bm->dma_cb = dma_cb;
+ bm->cur_prd_last = 0;
+ bm->cur_prd_addr = 0;
+ bm->cur_prd_len = 0;
+
+ if (bm->status & BM_STATUS_DMAING) {
+ bm->dma_cb(bmdma_active_if(bm), 0);
+ }
+}
+
+/**
+ * Prepare an sglist based on available PRDs.
+ * @limit: How many bytes to prepare total.
+ *
+ * Returns the number of bytes prepared, -1 on error.
+ * IDEState.io_buffer_size will contain the number of bytes described
+ * by the PRDs, whether or not we added them to the sglist.
+ */
+static int32_t bmdma_prepare_buf(const IDEDMA *dma, int32_t limit)
+{
+ BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
+ IDEState *s = bmdma_active_if(bm);
+ PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
+ struct {
+ uint32_t addr;
+ uint32_t size;
+ } prd;
+ int l, len;
+
+ pci_dma_sglist_init(&s->sg, pci_dev,
+ s->nsector / (BMDMA_PAGE_SIZE / BDRV_SECTOR_SIZE) + 1);
+ s->io_buffer_size = 0;
+ for(;;) {
+ if (bm->cur_prd_len == 0) {
+ /* end of table (with a fail safe of one page) */
+ if (bm->cur_prd_last ||
+ (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) {
+ return s->sg.size;
+ }
+ pci_dma_read(pci_dev, bm->cur_addr, &prd, 8);
+ bm->cur_addr += 8;
+ prd.addr = le32_to_cpu(prd.addr);
+ prd.size = le32_to_cpu(prd.size);
+ len = prd.size & 0xfffe;
+ if (len == 0)
+ len = 0x10000;
+ bm->cur_prd_len = len;
+ bm->cur_prd_addr = prd.addr;
+ bm->cur_prd_last = (prd.size & 0x80000000);
+ }
+ l = bm->cur_prd_len;
+ if (l > 0) {
+ uint64_t sg_len;
+
+ /* Don't add extra bytes to the SGList; consume any remaining
+ * PRDs from the guest, but ignore them. */
+ sg_len = MIN(limit - s->sg.size, bm->cur_prd_len);
+ if (sg_len) {
+ qemu_sglist_add(&s->sg, bm->cur_prd_addr, sg_len);
+ }
+
+ bm->cur_prd_addr += l;
+ bm->cur_prd_len -= l;
+ s->io_buffer_size += l;
+ }
+ }
+
+ qemu_sglist_destroy(&s->sg);
+ s->io_buffer_size = 0;
+ return -1;
+}
+
+/* return 0 if buffer completed */
+static int bmdma_rw_buf(const IDEDMA *dma, bool is_write)
+{
+ BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
+ IDEState *s = bmdma_active_if(bm);
+ PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
+ struct {
+ uint32_t addr;
+ uint32_t size;
+ } prd;
+ int l, len;
+
+ for(;;) {
+ l = s->io_buffer_size - s->io_buffer_index;
+ if (l <= 0)
+ break;
+ if (bm->cur_prd_len == 0) {
+ /* end of table (with a fail safe of one page) */
+ if (bm->cur_prd_last ||
+ (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
+ return 0;
+ pci_dma_read(pci_dev, bm->cur_addr, &prd, 8);
+ bm->cur_addr += 8;
+ prd.addr = le32_to_cpu(prd.addr);
+ prd.size = le32_to_cpu(prd.size);
+ len = prd.size & 0xfffe;
+ if (len == 0)
+ len = 0x10000;
+ bm->cur_prd_len = len;
+ bm->cur_prd_addr = prd.addr;
+ bm->cur_prd_last = (prd.size & 0x80000000);
+ }
+ if (l > bm->cur_prd_len)
+ l = bm->cur_prd_len;
+ if (l > 0) {
+ if (is_write) {
+ pci_dma_write(pci_dev, bm->cur_prd_addr,
+ s->io_buffer + s->io_buffer_index, l);
+ } else {
+ pci_dma_read(pci_dev, bm->cur_prd_addr,
+ s->io_buffer + s->io_buffer_index, l);
+ }
+ bm->cur_prd_addr += l;
+ bm->cur_prd_len -= l;
+ s->io_buffer_index += l;
+ }
+ }
+ return 1;
+}
+
+static void bmdma_set_inactive(const IDEDMA *dma, bool more)
+{
+ BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
+
+ bm->dma_cb = NULL;
+ if (more) {
+ bm->status |= BM_STATUS_DMAING;
+ } else {
+ bm->status &= ~BM_STATUS_DMAING;
+ }
+}
+
+static void bmdma_restart_dma(const IDEDMA *dma)
+{
+ BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
+
+ bm->cur_addr = bm->addr;
+}
+
+static void bmdma_cancel(BMDMAState *bm)
+{
+ if (bm->status & BM_STATUS_DMAING) {
+ /* cancel DMA request */
+ bmdma_set_inactive(&bm->dma, false);
+ }
+}
+
+static void bmdma_reset(const IDEDMA *dma)
+{
+ BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
+
+ trace_bmdma_reset();
+ bmdma_cancel(bm);
+ bm->cmd = 0;
+ bm->status = 0;
+ bm->addr = 0;
+ bm->cur_addr = 0;
+ bm->cur_prd_last = 0;
+ bm->cur_prd_addr = 0;
+ bm->cur_prd_len = 0;
+}
+
+static void bmdma_irq(void *opaque, int n, int level)
+{
+ BMDMAState *bm = opaque;
+
+ if (!level) {
+ /* pass through lower */
+ qemu_set_irq(bm->irq, level);
+ return;
+ }
+
+ bm->status |= BM_STATUS_INT;
+
+ /* trigger the real irq */
+ qemu_set_irq(bm->irq, level);
+}
+
+void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val)
+{
+ trace_bmdma_cmd_writeb(val);
+
+ /* Ignore writes to SSBM if it keeps the old value */
+ if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) {
+ if (!(val & BM_CMD_START)) {
+ ide_cancel_dma_sync(idebus_active_if(bm->bus));
+ bm->status &= ~BM_STATUS_DMAING;
+ } else {
+ bm->cur_addr = bm->addr;
+ if (!(bm->status & BM_STATUS_DMAING)) {
+ bm->status |= BM_STATUS_DMAING;
+ /* start dma transfer if possible */
+ if (bm->dma_cb)
+ bm->dma_cb(bmdma_active_if(bm), 0);
+ }
+ }
+ }
+
+ bm->cmd = val & 0x09;
+}
+
+static uint64_t bmdma_addr_read(void *opaque, hwaddr addr,
+ unsigned width)
+{
+ BMDMAState *bm = opaque;
+ uint32_t mask = (1ULL << (width * 8)) - 1;
+ uint64_t data;
+
+ data = (bm->addr >> (addr * 8)) & mask;
+ trace_bmdma_addr_read(data);
+ return data;
+}
+
+static void bmdma_addr_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned width)
+{
+ BMDMAState *bm = opaque;
+ int shift = addr * 8;
+ uint32_t mask = (1ULL << (width * 8)) - 1;
+
+ trace_bmdma_addr_write(data);
+ bm->addr &= ~(mask << shift);
+ bm->addr |= ((data & mask) << shift) & ~3;
+}
+
+MemoryRegionOps bmdma_addr_ioport_ops = {
+ .read = bmdma_addr_read,
+ .write = bmdma_addr_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static bool ide_bmdma_current_needed(void *opaque)
+{
+ BMDMAState *bm = opaque;
+
+ return (bm->cur_prd_len != 0);
+}
+
+static bool ide_bmdma_status_needed(void *opaque)
+{
+ BMDMAState *bm = opaque;
+
+ /* Older versions abused some bits in the status register for internal
+ * error state. If any of these bits are set, we must add a subsection to
+ * transfer the real status register */
+ uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
+
+ return ((bm->status & abused_bits) != 0);
+}
+
+static int ide_bmdma_pre_save(void *opaque)
+{
+ BMDMAState *bm = opaque;
+ uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
+
+ if (!(bm->status & BM_STATUS_DMAING) && bm->dma_cb) {
+ bm->bus->error_status =
+ ide_dma_cmd_to_retry(bmdma_active_if(bm)->dma_cmd);
+ }
+ bm->migration_retry_unit = bm->bus->retry_unit;
+ bm->migration_retry_sector_num = bm->bus->retry_sector_num;
+ bm->migration_retry_nsector = bm->bus->retry_nsector;
+ bm->migration_compat_status =
+ (bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits);
+
+ return 0;
+}
+
+/* This function accesses bm->bus->error_status which is loaded only after
+ * BMDMA itself. This is why the function is called from ide_pci_post_load
+ * instead of being registered with VMState where it would run too early. */
+static int ide_bmdma_post_load(void *opaque, int version_id)
+{
+ BMDMAState *bm = opaque;
+ uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
+
+ if (bm->status == 0) {
+ bm->status = bm->migration_compat_status & ~abused_bits;
+ bm->bus->error_status |= bm->migration_compat_status & abused_bits;
+ }
+ if (bm->bus->error_status) {
+ bm->bus->retry_sector_num = bm->migration_retry_sector_num;
+ bm->bus->retry_nsector = bm->migration_retry_nsector;
+ bm->bus->retry_unit = bm->migration_retry_unit;
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_bmdma_current = {
+ .name = "ide bmdma_current",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = ide_bmdma_current_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(cur_addr, BMDMAState),
+ VMSTATE_UINT32(cur_prd_last, BMDMAState),
+ VMSTATE_UINT32(cur_prd_addr, BMDMAState),
+ VMSTATE_UINT32(cur_prd_len, BMDMAState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_bmdma_status = {
+ .name ="ide bmdma/status",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = ide_bmdma_status_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(status, BMDMAState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_bmdma = {
+ .name = "ide bmdma",
+ .version_id = 3,
+ .minimum_version_id = 0,
+ .pre_save = ide_bmdma_pre_save,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(cmd, BMDMAState),
+ VMSTATE_UINT8(migration_compat_status, BMDMAState),
+ VMSTATE_UINT32(addr, BMDMAState),
+ VMSTATE_INT64(migration_retry_sector_num, BMDMAState),
+ VMSTATE_UINT32(migration_retry_nsector, BMDMAState),
+ VMSTATE_UINT8(migration_retry_unit, BMDMAState),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_bmdma_current,
+ &vmstate_bmdma_status,
+ NULL
+ }
+};
+
+static int ide_pci_post_load(void *opaque, int version_id)
+{
+ PCIIDEState *d = opaque;
+ int i;
+
+ for(i = 0; i < 2; i++) {
+ /* current versions always store 0/1, but older version
+ stored bigger values. We only need last bit */
+ d->bmdma[i].migration_retry_unit &= 1;
+ ide_bmdma_post_load(&d->bmdma[i], -1);
+ }
+
+ return 0;
+}
+
+const VMStateDescription vmstate_ide_pci = {
+ .name = "ide",
+ .version_id = 3,
+ .minimum_version_id = 0,
+ .post_load = ide_pci_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(parent_obj, PCIIDEState),
+ VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0,
+ vmstate_bmdma, BMDMAState),
+ VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2),
+ VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState),
+ VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* hd_table must contain 4 block drivers */
+void pci_ide_create_devs(PCIDevice *dev)
+{
+ PCIIDEState *d = PCI_IDE(dev);
+ DriveInfo *hd_table[2 * MAX_IDE_DEVS];
+ static const int bus[4] = { 0, 0, 1, 1 };
+ static const int unit[4] = { 0, 1, 0, 1 };
+ int i;
+
+ ide_drive_get(hd_table, ARRAY_SIZE(hd_table));
+ for (i = 0; i < 4; i++) {
+ if (hd_table[i]) {
+ ide_create_drive(d->bus + bus[i], unit[i], hd_table[i]);
+ }
+ }
+}
+
+static const struct IDEDMAOps bmdma_ops = {
+ .start_dma = bmdma_start_dma,
+ .prepare_buf = bmdma_prepare_buf,
+ .rw_buf = bmdma_rw_buf,
+ .restart_dma = bmdma_restart_dma,
+ .set_inactive = bmdma_set_inactive,
+ .reset = bmdma_reset,
+};
+
+void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d)
+{
+ if (bus->dma == &bm->dma) {
+ return;
+ }
+
+ bm->dma.ops = &bmdma_ops;
+ bus->dma = &bm->dma;
+ bm->irq = bus->irq;
+ bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0);
+ bm->pci_dev = d;
+}
+
+static const TypeInfo pci_ide_type_info = {
+ .name = TYPE_PCI_IDE,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(PCIIDEState),
+ .abstract = true,
+ .interfaces = (InterfaceInfo[]) {
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { },
+ },
+};
+
+static void pci_ide_register_types(void)
+{
+ type_register_static(&pci_ide_type_info);
+}
+
+type_init(pci_ide_register_types)
diff --git a/hw/ide/piix.c b/hw/ide/piix.c
new file mode 100644
index 000000000..ce89fd0aa
--- /dev/null
+++ b/hw/ide/piix.c
@@ -0,0 +1,279 @@
+/*
+ * QEMU IDE Emulation: PCI PIIX3/4 support.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/pci/pci.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+#include "qemu/module.h"
+#include "sysemu/block-backend.h"
+#include "sysemu/blockdev.h"
+#include "sysemu/dma.h"
+
+#include "hw/ide/pci.h"
+#include "trace.h"
+
+static uint64_t bmdma_read(void *opaque, hwaddr addr, unsigned size)
+{
+ BMDMAState *bm = opaque;
+ uint32_t val;
+
+ if (size != 1) {
+ return ((uint64_t)1 << (size * 8)) - 1;
+ }
+
+ switch(addr & 3) {
+ case 0:
+ val = bm->cmd;
+ break;
+ case 2:
+ val = bm->status;
+ break;
+ default:
+ val = 0xff;
+ break;
+ }
+
+ trace_bmdma_read(addr, val);
+ return val;
+}
+
+static void bmdma_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ BMDMAState *bm = opaque;
+
+ if (size != 1) {
+ return;
+ }
+
+ trace_bmdma_write(addr, val);
+
+ switch(addr & 3) {
+ case 0:
+ bmdma_cmd_writeb(bm, val);
+ break;
+ case 2:
+ bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06);
+ break;
+ }
+}
+
+static const MemoryRegionOps piix_bmdma_ops = {
+ .read = bmdma_read,
+ .write = bmdma_write,
+};
+
+static void bmdma_setup_bar(PCIIDEState *d)
+{
+ int i;
+
+ memory_region_init(&d->bmdma_bar, OBJECT(d), "piix-bmdma-container", 16);
+ for(i = 0;i < 2; i++) {
+ BMDMAState *bm = &d->bmdma[i];
+
+ memory_region_init_io(&bm->extra_io, OBJECT(d), &piix_bmdma_ops, bm,
+ "piix-bmdma", 4);
+ memory_region_add_subregion(&d->bmdma_bar, i * 8, &bm->extra_io);
+ memory_region_init_io(&bm->addr_ioport, OBJECT(d),
+ &bmdma_addr_ioport_ops, bm, "bmdma", 4);
+ memory_region_add_subregion(&d->bmdma_bar, i * 8 + 4, &bm->addr_ioport);
+ }
+}
+
+static void piix_ide_reset(DeviceState *dev)
+{
+ PCIIDEState *d = PCI_IDE(dev);
+ PCIDevice *pd = PCI_DEVICE(d);
+ uint8_t *pci_conf = pd->config;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ ide_bus_reset(&d->bus[i]);
+ }
+
+ /* TODO: this is the default. do not override. */
+ pci_conf[PCI_COMMAND] = 0x00;
+ /* TODO: this is the default. do not override. */
+ pci_conf[PCI_COMMAND + 1] = 0x00;
+ /* TODO: use pci_set_word */
+ pci_conf[PCI_STATUS] = PCI_STATUS_FAST_BACK;
+ pci_conf[PCI_STATUS + 1] = PCI_STATUS_DEVSEL_MEDIUM >> 8;
+ pci_conf[0x20] = 0x01; /* BMIBA: 20-23h */
+}
+
+static int pci_piix_init_ports(PCIIDEState *d)
+{
+ static const struct {
+ int iobase;
+ int iobase2;
+ int isairq;
+ } port_info[] = {
+ {0x1f0, 0x3f6, 14},
+ {0x170, 0x376, 15},
+ };
+ int i, ret;
+
+ for (i = 0; i < 2; i++) {
+ ide_bus_init(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2);
+ ret = ide_init_ioport(&d->bus[i], NULL, port_info[i].iobase,
+ port_info[i].iobase2);
+ if (ret) {
+ return ret;
+ }
+ ide_init2(&d->bus[i], isa_get_irq(NULL, port_info[i].isairq));
+
+ bmdma_init(&d->bus[i], &d->bmdma[i], d);
+ d->bmdma[i].bus = &d->bus[i];
+ ide_register_restart_cb(&d->bus[i]);
+ }
+
+ return 0;
+}
+
+static void pci_piix_ide_realize(PCIDevice *dev, Error **errp)
+{
+ PCIIDEState *d = PCI_IDE(dev);
+ uint8_t *pci_conf = dev->config;
+ int rc;
+
+ pci_conf[PCI_CLASS_PROG] = 0x80; // legacy ATA mode
+
+ bmdma_setup_bar(d);
+ pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar);
+
+ vmstate_register(VMSTATE_IF(dev), 0, &vmstate_ide_pci, d);
+
+ rc = pci_piix_init_ports(d);
+ if (rc) {
+ error_setg_errno(errp, -rc, "Failed to realize %s",
+ object_get_typename(OBJECT(dev)));
+ }
+}
+
+int pci_piix3_xen_ide_unplug(DeviceState *dev, bool aux)
+{
+ PCIIDEState *pci_ide;
+ int i;
+ IDEDevice *idedev;
+ IDEBus *idebus;
+ BlockBackend *blk;
+
+ pci_ide = PCI_IDE(dev);
+
+ for (i = aux ? 1 : 0; i < 4; i++) {
+ idebus = &pci_ide->bus[i / 2];
+ blk = idebus->ifs[i % 2].blk;
+
+ if (blk && idebus->ifs[i % 2].drive_kind != IDE_CD) {
+ if (!(i % 2)) {
+ idedev = idebus->master;
+ } else {
+ idedev = idebus->slave;
+ }
+
+ blk_drain(blk);
+ blk_flush(blk);
+
+ blk_detach_dev(blk, DEVICE(idedev));
+ idebus->ifs[i % 2].blk = NULL;
+ idedev->conf.blk = NULL;
+ monitor_remove_blk(blk);
+ blk_unref(blk);
+ }
+ }
+ qdev_reset_all(dev);
+ return 0;
+}
+
+static void pci_piix_ide_exitfn(PCIDevice *dev)
+{
+ PCIIDEState *d = PCI_IDE(dev);
+ unsigned i;
+
+ for (i = 0; i < 2; ++i) {
+ memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].extra_io);
+ memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].addr_ioport);
+ }
+}
+
+/* NOTE: for the PIIX3, the IRQs and IOports are hardcoded */
+static void piix3_ide_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ dc->reset = piix_ide_reset;
+ k->realize = pci_piix_ide_realize;
+ k->exit = pci_piix_ide_exitfn;
+ k->vendor_id = PCI_VENDOR_ID_INTEL;
+ k->device_id = PCI_DEVICE_ID_INTEL_82371SB_1;
+ k->class_id = PCI_CLASS_STORAGE_IDE;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+ dc->hotpluggable = false;
+}
+
+static const TypeInfo piix3_ide_info = {
+ .name = "piix3-ide",
+ .parent = TYPE_PCI_IDE,
+ .class_init = piix3_ide_class_init,
+};
+
+static const TypeInfo piix3_ide_xen_info = {
+ .name = "piix3-ide-xen",
+ .parent = TYPE_PCI_IDE,
+ .class_init = piix3_ide_class_init,
+};
+
+/* NOTE: for the PIIX4, the IRQs and IOports are hardcoded */
+static void piix4_ide_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ dc->reset = piix_ide_reset;
+ k->realize = pci_piix_ide_realize;
+ k->exit = pci_piix_ide_exitfn;
+ k->vendor_id = PCI_VENDOR_ID_INTEL;
+ k->device_id = PCI_DEVICE_ID_INTEL_82371AB;
+ k->class_id = PCI_CLASS_STORAGE_IDE;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+ dc->hotpluggable = false;
+}
+
+static const TypeInfo piix4_ide_info = {
+ .name = "piix4-ide",
+ .parent = TYPE_PCI_IDE,
+ .class_init = piix4_ide_class_init,
+};
+
+static void piix_ide_register_types(void)
+{
+ type_register_static(&piix3_ide_info);
+ type_register_static(&piix3_ide_xen_info);
+ type_register_static(&piix4_ide_info);
+}
+
+type_init(piix_ide_register_types)
diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c
new file mode 100644
index 000000000..618045b85
--- /dev/null
+++ b/hw/ide/qdev.c
@@ -0,0 +1,371 @@
+/*
+ * ide bus support for qdev.
+ *
+ * Copyright (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "sysemu/dma.h"
+#include "qapi/error.h"
+#include "qapi/qapi-types-block.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qemu/module.h"
+#include "hw/ide/internal.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
+#include "sysemu/block-backend.h"
+#include "sysemu/blockdev.h"
+#include "hw/block/block.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/runstate.h"
+#include "qapi/visitor.h"
+
+/* --------------------------------- */
+
+static char *idebus_get_fw_dev_path(DeviceState *dev);
+static void idebus_unrealize(BusState *qdev);
+
+static Property ide_props[] = {
+ DEFINE_PROP_UINT32("unit", IDEDevice, unit, -1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void ide_bus_class_init(ObjectClass *klass, void *data)
+{
+ BusClass *k = BUS_CLASS(klass);
+
+ k->get_fw_dev_path = idebus_get_fw_dev_path;
+ k->unrealize = idebus_unrealize;
+}
+
+static void idebus_unrealize(BusState *bus)
+{
+ IDEBus *ibus = IDE_BUS(bus);
+
+ if (ibus->vmstate) {
+ qemu_del_vm_change_state_handler(ibus->vmstate);
+ }
+}
+
+static const TypeInfo ide_bus_info = {
+ .name = TYPE_IDE_BUS,
+ .parent = TYPE_BUS,
+ .instance_size = sizeof(IDEBus),
+ .class_init = ide_bus_class_init,
+};
+
+void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev,
+ int bus_id, int max_units)
+{
+ qbus_init(idebus, idebus_size, TYPE_IDE_BUS, dev, NULL);
+ idebus->bus_id = bus_id;
+ idebus->max_units = max_units;
+}
+
+static char *idebus_get_fw_dev_path(DeviceState *dev)
+{
+ char path[30];
+
+ snprintf(path, sizeof(path), "%s@%x", qdev_fw_name(dev),
+ ((IDEBus*)dev->parent_bus)->bus_id);
+
+ return g_strdup(path);
+}
+
+static void ide_qdev_realize(DeviceState *qdev, Error **errp)
+{
+ IDEDevice *dev = IDE_DEVICE(qdev);
+ IDEDeviceClass *dc = IDE_DEVICE_GET_CLASS(dev);
+ IDEBus *bus = DO_UPCAST(IDEBus, qbus, qdev->parent_bus);
+
+ if (dev->unit == -1) {
+ dev->unit = bus->master ? 1 : 0;
+ }
+
+ if (dev->unit >= bus->max_units) {
+ error_setg(errp, "Can't create IDE unit %d, bus supports only %d units",
+ dev->unit, bus->max_units);
+ return;
+ }
+
+ switch (dev->unit) {
+ case 0:
+ if (bus->master) {
+ error_setg(errp, "IDE unit %d is in use", dev->unit);
+ return;
+ }
+ bus->master = dev;
+ break;
+ case 1:
+ if (bus->slave) {
+ error_setg(errp, "IDE unit %d is in use", dev->unit);
+ return;
+ }
+ bus->slave = dev;
+ break;
+ default:
+ error_setg(errp, "Invalid IDE unit %d", dev->unit);
+ return;
+ }
+ dc->realize(dev, errp);
+}
+
+IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive)
+{
+ DeviceState *dev;
+
+ dev = qdev_new(drive->media_cd ? "ide-cd" : "ide-hd");
+ qdev_prop_set_uint32(dev, "unit", unit);
+ qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(drive),
+ &error_fatal);
+ qdev_realize_and_unref(dev, &bus->qbus, &error_fatal);
+ return DO_UPCAST(IDEDevice, qdev, dev);
+}
+
+int ide_get_geometry(BusState *bus, int unit,
+ int16_t *cyls, int8_t *heads, int8_t *secs)
+{
+ IDEState *s = &DO_UPCAST(IDEBus, qbus, bus)->ifs[unit];
+
+ if (s->drive_kind != IDE_HD || !s->blk) {
+ return -1;
+ }
+
+ *cyls = s->cylinders;
+ *heads = s->heads;
+ *secs = s->sectors;
+ return 0;
+}
+
+int ide_get_bios_chs_trans(BusState *bus, int unit)
+{
+ return DO_UPCAST(IDEBus, qbus, bus)->ifs[unit].chs_trans;
+}
+
+/* --------------------------------- */
+
+typedef struct IDEDrive {
+ IDEDevice dev;
+} IDEDrive;
+
+static void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp)
+{
+ IDEBus *bus = DO_UPCAST(IDEBus, qbus, dev->qdev.parent_bus);
+ IDEState *s = bus->ifs + dev->unit;
+ int ret;
+
+ if (!dev->conf.blk) {
+ if (kind != IDE_CD) {
+ error_setg(errp, "No drive specified");
+ return;
+ } else {
+ /* Anonymous BlockBackend for an empty drive */
+ dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
+ ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
+ assert(ret == 0);
+ }
+ }
+
+ if (dev->conf.discard_granularity == -1) {
+ dev->conf.discard_granularity = 512;
+ } else if (dev->conf.discard_granularity &&
+ dev->conf.discard_granularity != 512) {
+ error_setg(errp, "discard_granularity must be 512 for ide");
+ return;
+ }
+
+ if (!blkconf_blocksizes(&dev->conf, errp)) {
+ return;
+ }
+
+ if (dev->conf.logical_block_size != 512) {
+ error_setg(errp, "logical_block_size must be 512 for IDE");
+ return;
+ }
+
+ if (kind != IDE_CD) {
+ if (!blkconf_geometry(&dev->conf, &dev->chs_trans, 65535, 16, 255,
+ errp)) {
+ return;
+ }
+ }
+ if (!blkconf_apply_backend_options(&dev->conf, kind == IDE_CD,
+ kind != IDE_CD, errp)) {
+ return;
+ }
+
+ if (ide_init_drive(s, dev->conf.blk, kind,
+ dev->version, dev->serial, dev->model, dev->wwn,
+ dev->conf.cyls, dev->conf.heads, dev->conf.secs,
+ dev->chs_trans, errp) < 0) {
+ return;
+ }
+
+ if (!dev->version) {
+ dev->version = g_strdup(s->version);
+ }
+ if (!dev->serial) {
+ dev->serial = g_strdup(s->drive_serial_str);
+ }
+
+ add_boot_device_path(dev->conf.bootindex, &dev->qdev,
+ dev->unit ? "/disk@1" : "/disk@0");
+
+ add_boot_device_lchs(&dev->qdev, dev->unit ? "/disk@1" : "/disk@0",
+ dev->conf.lcyls,
+ dev->conf.lheads,
+ dev->conf.lsecs);
+}
+
+static void ide_dev_get_bootindex(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ IDEDevice *d = IDE_DEVICE(obj);
+
+ visit_type_int32(v, name, &d->conf.bootindex, errp);
+}
+
+static void ide_dev_set_bootindex(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ IDEDevice *d = IDE_DEVICE(obj);
+ int32_t boot_index;
+ Error *local_err = NULL;
+
+ if (!visit_type_int32(v, name, &boot_index, errp)) {
+ return;
+ }
+ /* check whether bootindex is present in fw_boot_order list */
+ check_boot_index(boot_index, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ /* change bootindex to a new one */
+ d->conf.bootindex = boot_index;
+
+ if (d->unit != -1) {
+ add_boot_device_path(d->conf.bootindex, &d->qdev,
+ d->unit ? "/disk@1" : "/disk@0");
+ }
+out:
+ error_propagate(errp, local_err);
+}
+
+static void ide_dev_instance_init(Object *obj)
+{
+ object_property_add(obj, "bootindex", "int32",
+ ide_dev_get_bootindex,
+ ide_dev_set_bootindex, NULL, NULL);
+ object_property_set_int(obj, "bootindex", -1, NULL);
+}
+
+static void ide_hd_realize(IDEDevice *dev, Error **errp)
+{
+ ide_dev_initfn(dev, IDE_HD, errp);
+}
+
+static void ide_cd_realize(IDEDevice *dev, Error **errp)
+{
+ ide_dev_initfn(dev, IDE_CD, errp);
+}
+
+#define DEFINE_IDE_DEV_PROPERTIES() \
+ DEFINE_BLOCK_PROPERTIES(IDEDrive, dev.conf), \
+ DEFINE_BLOCK_ERROR_PROPERTIES(IDEDrive, dev.conf), \
+ DEFINE_PROP_STRING("ver", IDEDrive, dev.version), \
+ DEFINE_PROP_UINT64("wwn", IDEDrive, dev.wwn, 0), \
+ DEFINE_PROP_STRING("serial", IDEDrive, dev.serial),\
+ DEFINE_PROP_STRING("model", IDEDrive, dev.model)
+
+static Property ide_hd_properties[] = {
+ DEFINE_IDE_DEV_PROPERTIES(),
+ DEFINE_BLOCK_CHS_PROPERTIES(IDEDrive, dev.conf),
+ DEFINE_PROP_BIOS_CHS_TRANS("bios-chs-trans",
+ IDEDrive, dev.chs_trans, BIOS_ATA_TRANSLATION_AUTO),
+ DEFINE_PROP_UINT16("rotation_rate", IDEDrive, dev.rotation_rate, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void ide_hd_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ IDEDeviceClass *k = IDE_DEVICE_CLASS(klass);
+
+ k->realize = ide_hd_realize;
+ dc->fw_name = "drive";
+ dc->desc = "virtual IDE disk";
+ device_class_set_props(dc, ide_hd_properties);
+}
+
+static const TypeInfo ide_hd_info = {
+ .name = "ide-hd",
+ .parent = TYPE_IDE_DEVICE,
+ .instance_size = sizeof(IDEDrive),
+ .class_init = ide_hd_class_init,
+};
+
+static Property ide_cd_properties[] = {
+ DEFINE_IDE_DEV_PROPERTIES(),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void ide_cd_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ IDEDeviceClass *k = IDE_DEVICE_CLASS(klass);
+
+ k->realize = ide_cd_realize;
+ dc->fw_name = "drive";
+ dc->desc = "virtual IDE CD-ROM";
+ device_class_set_props(dc, ide_cd_properties);
+}
+
+static const TypeInfo ide_cd_info = {
+ .name = "ide-cd",
+ .parent = TYPE_IDE_DEVICE,
+ .instance_size = sizeof(IDEDrive),
+ .class_init = ide_cd_class_init,
+};
+
+static void ide_device_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *k = DEVICE_CLASS(klass);
+ k->realize = ide_qdev_realize;
+ set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
+ k->bus_type = TYPE_IDE_BUS;
+ device_class_set_props(k, ide_props);
+}
+
+static const TypeInfo ide_device_type_info = {
+ .name = TYPE_IDE_DEVICE,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(IDEDevice),
+ .abstract = true,
+ .class_size = sizeof(IDEDeviceClass),
+ .class_init = ide_device_class_init,
+ .instance_init = ide_dev_instance_init,
+};
+
+static void ide_register_types(void)
+{
+ type_register_static(&ide_bus_info);
+ type_register_static(&ide_hd_info);
+ type_register_static(&ide_cd_info);
+ type_register_static(&ide_device_type_info);
+}
+
+type_init(ide_register_types)
diff --git a/hw/ide/sii3112.c b/hw/ide/sii3112.c
new file mode 100644
index 000000000..46204f10d
--- /dev/null
+++ b/hw/ide/sii3112.c
@@ -0,0 +1,322 @@
+/*
+ * QEMU SiI3112A PCI to Serial ATA Controller Emulation
+ *
+ * Copyright (C) 2017 BALATON Zoltan <balaton@eik.bme.hu>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/* For documentation on this and similar cards see:
+ * http://wiki.osdev.org/User:Quok/Silicon_Image_Datasheets
+ */
+
+#include "qemu/osdep.h"
+#include "hw/ide/pci.h"
+#include "qemu/module.h"
+#include "trace.h"
+#include "qom/object.h"
+
+#define TYPE_SII3112_PCI "sii3112"
+OBJECT_DECLARE_SIMPLE_TYPE(SiI3112PCIState, SII3112_PCI)
+
+typedef struct SiI3112Regs {
+ uint32_t confstat;
+ uint32_t scontrol;
+ uint16_t sien;
+ uint8_t swdata;
+} SiI3112Regs;
+
+struct SiI3112PCIState {
+ PCIIDEState i;
+ MemoryRegion mmio;
+ SiI3112Regs regs[2];
+};
+
+/* The sii3112_reg_read and sii3112_reg_write functions implement the
+ * Internal Register Space - BAR5 (section 6.7 of the data sheet).
+ */
+
+static uint64_t sii3112_reg_read(void *opaque, hwaddr addr,
+ unsigned int size)
+{
+ SiI3112PCIState *d = opaque;
+ uint64_t val;
+
+ switch (addr) {
+ case 0x00:
+ val = d->i.bmdma[0].cmd;
+ break;
+ case 0x01:
+ val = d->regs[0].swdata;
+ break;
+ case 0x02:
+ val = d->i.bmdma[0].status;
+ break;
+ case 0x03:
+ val = 0;
+ break;
+ case 0x04 ... 0x07:
+ val = bmdma_addr_ioport_ops.read(&d->i.bmdma[0], addr - 4, size);
+ break;
+ case 0x08:
+ val = d->i.bmdma[1].cmd;
+ break;
+ case 0x09:
+ val = d->regs[1].swdata;
+ break;
+ case 0x0a:
+ val = d->i.bmdma[1].status;
+ break;
+ case 0x0b:
+ val = 0;
+ break;
+ case 0x0c ... 0x0f:
+ val = bmdma_addr_ioport_ops.read(&d->i.bmdma[1], addr - 12, size);
+ break;
+ case 0x10:
+ val = d->i.bmdma[0].cmd;
+ val |= (d->regs[0].confstat & (1UL << 11) ? (1 << 4) : 0); /*SATAINT0*/
+ val |= (d->regs[1].confstat & (1UL << 11) ? (1 << 6) : 0); /*SATAINT1*/
+ val |= (d->i.bmdma[1].status & BM_STATUS_INT ? (1 << 14) : 0);
+ val |= (uint32_t)d->i.bmdma[0].status << 16;
+ val |= (uint32_t)d->i.bmdma[1].status << 24;
+ break;
+ case 0x18:
+ val = d->i.bmdma[1].cmd;
+ val |= (d->regs[1].confstat & (1UL << 11) ? (1 << 4) : 0);
+ val |= (uint32_t)d->i.bmdma[1].status << 16;
+ break;
+ case 0x80 ... 0x87:
+ val = pci_ide_data_le_ops.read(&d->i.bus[0], addr - 0x80, size);
+ break;
+ case 0x8a:
+ val = pci_ide_cmd_le_ops.read(&d->i.bus[0], 2, size);
+ break;
+ case 0xa0:
+ val = d->regs[0].confstat;
+ break;
+ case 0xc0 ... 0xc7:
+ val = pci_ide_data_le_ops.read(&d->i.bus[1], addr - 0xc0, size);
+ break;
+ case 0xca:
+ val = pci_ide_cmd_le_ops.read(&d->i.bus[1], 2, size);
+ break;
+ case 0xe0:
+ val = d->regs[1].confstat;
+ break;
+ case 0x100:
+ val = d->regs[0].scontrol;
+ break;
+ case 0x104:
+ val = (d->i.bus[0].ifs[0].blk) ? 0x113 : 0;
+ break;
+ case 0x148:
+ val = (uint32_t)d->regs[0].sien << 16;
+ break;
+ case 0x180:
+ val = d->regs[1].scontrol;
+ break;
+ case 0x184:
+ val = (d->i.bus[1].ifs[0].blk) ? 0x113 : 0;
+ break;
+ case 0x1c8:
+ val = (uint32_t)d->regs[1].sien << 16;
+ break;
+ default:
+ val = 0;
+ break;
+ }
+ trace_sii3112_read(size, addr, val);
+ return val;
+}
+
+static void sii3112_reg_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned int size)
+{
+ SiI3112PCIState *d = opaque;
+
+ trace_sii3112_write(size, addr, val);
+ switch (addr) {
+ case 0x00:
+ case 0x10:
+ bmdma_cmd_writeb(&d->i.bmdma[0], val);
+ break;
+ case 0x01:
+ case 0x11:
+ d->regs[0].swdata = val & 0x3f;
+ break;
+ case 0x02:
+ case 0x12:
+ d->i.bmdma[0].status = (val & 0x60) | (d->i.bmdma[0].status & 1) |
+ (d->i.bmdma[0].status & ~val & 6);
+ break;
+ case 0x04 ... 0x07:
+ bmdma_addr_ioport_ops.write(&d->i.bmdma[0], addr - 4, val, size);
+ break;
+ case 0x08:
+ case 0x18:
+ bmdma_cmd_writeb(&d->i.bmdma[1], val);
+ break;
+ case 0x09:
+ case 0x19:
+ d->regs[1].swdata = val & 0x3f;
+ break;
+ case 0x0a:
+ case 0x1a:
+ d->i.bmdma[1].status = (val & 0x60) | (d->i.bmdma[1].status & 1) |
+ (d->i.bmdma[1].status & ~val & 6);
+ break;
+ case 0x0c ... 0x0f:
+ bmdma_addr_ioport_ops.write(&d->i.bmdma[1], addr - 12, val, size);
+ break;
+ case 0x80 ... 0x87:
+ pci_ide_data_le_ops.write(&d->i.bus[0], addr - 0x80, val, size);
+ break;
+ case 0x8a:
+ pci_ide_cmd_le_ops.write(&d->i.bus[0], 2, val, size);
+ break;
+ case 0xc0 ... 0xc7:
+ pci_ide_data_le_ops.write(&d->i.bus[1], addr - 0xc0, val, size);
+ break;
+ case 0xca:
+ pci_ide_cmd_le_ops.write(&d->i.bus[1], 2, val, size);
+ break;
+ case 0x100:
+ d->regs[0].scontrol = val & 0xfff;
+ if (val & 1) {
+ ide_bus_reset(&d->i.bus[0]);
+ }
+ break;
+ case 0x148:
+ d->regs[0].sien = (val >> 16) & 0x3eed;
+ break;
+ case 0x180:
+ d->regs[1].scontrol = val & 0xfff;
+ if (val & 1) {
+ ide_bus_reset(&d->i.bus[1]);
+ }
+ break;
+ case 0x1c8:
+ d->regs[1].sien = (val >> 16) & 0x3eed;
+ break;
+ default:
+ break;
+ }
+}
+
+static const MemoryRegionOps sii3112_reg_ops = {
+ .read = sii3112_reg_read,
+ .write = sii3112_reg_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+/* the PCI irq level is the logical OR of the two channels */
+static void sii3112_update_irq(SiI3112PCIState *s)
+{
+ int i, set = 0;
+
+ for (i = 0; i < 2; i++) {
+ set |= s->regs[i].confstat & (1UL << 11);
+ }
+ pci_set_irq(PCI_DEVICE(s), (set ? 1 : 0));
+}
+
+static void sii3112_set_irq(void *opaque, int channel, int level)
+{
+ SiI3112PCIState *s = opaque;
+
+ trace_sii3112_set_irq(channel, level);
+ if (level) {
+ s->regs[channel].confstat |= (1UL << 11);
+ } else {
+ s->regs[channel].confstat &= ~(1UL << 11);
+ }
+
+ sii3112_update_irq(s);
+}
+
+static void sii3112_reset(DeviceState *dev)
+{
+ SiI3112PCIState *s = SII3112_PCI(dev);
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ s->regs[i].confstat = 0x6515 << 16;
+ ide_bus_reset(&s->i.bus[i]);
+ }
+}
+
+static void sii3112_pci_realize(PCIDevice *dev, Error **errp)
+{
+ SiI3112PCIState *d = SII3112_PCI(dev);
+ PCIIDEState *s = PCI_IDE(dev);
+ DeviceState *ds = DEVICE(dev);
+ MemoryRegion *mr;
+ int i;
+
+ pci_config_set_interrupt_pin(dev->config, 1);
+ pci_set_byte(dev->config + PCI_CACHE_LINE_SIZE, 8);
+
+ /* BAR5 is in PCI memory space */
+ memory_region_init_io(&d->mmio, OBJECT(d), &sii3112_reg_ops, d,
+ "sii3112.bar5", 0x200);
+ pci_register_bar(dev, 5, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
+
+ /* BAR0-BAR4 are PCI I/O space aliases into BAR5 */
+ mr = g_new(MemoryRegion, 1);
+ memory_region_init_alias(mr, OBJECT(d), "sii3112.bar0", &d->mmio, 0x80, 8);
+ pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, mr);
+ mr = g_new(MemoryRegion, 1);
+ memory_region_init_alias(mr, OBJECT(d), "sii3112.bar1", &d->mmio, 0x88, 4);
+ pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_IO, mr);
+ mr = g_new(MemoryRegion, 1);
+ memory_region_init_alias(mr, OBJECT(d), "sii3112.bar2", &d->mmio, 0xc0, 8);
+ pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_IO, mr);
+ mr = g_new(MemoryRegion, 1);
+ memory_region_init_alias(mr, OBJECT(d), "sii3112.bar3", &d->mmio, 0xc8, 4);
+ pci_register_bar(dev, 3, PCI_BASE_ADDRESS_SPACE_IO, mr);
+ mr = g_new(MemoryRegion, 1);
+ memory_region_init_alias(mr, OBJECT(d), "sii3112.bar4", &d->mmio, 0, 16);
+ pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, mr);
+
+ qdev_init_gpio_in(ds, sii3112_set_irq, 2);
+ for (i = 0; i < 2; i++) {
+ ide_bus_init(&s->bus[i], sizeof(s->bus[i]), ds, i, 1);
+ ide_init2(&s->bus[i], qdev_get_gpio_in(ds, i));
+
+ bmdma_init(&s->bus[i], &s->bmdma[i], s);
+ s->bmdma[i].bus = &s->bus[i];
+ ide_register_restart_cb(&s->bus[i]);
+ }
+}
+
+static void sii3112_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *pd = PCI_DEVICE_CLASS(klass);
+
+ pd->vendor_id = 0x1095;
+ pd->device_id = 0x3112;
+ pd->class_id = PCI_CLASS_STORAGE_RAID;
+ pd->revision = 1;
+ pd->realize = sii3112_pci_realize;
+ dc->reset = sii3112_reset;
+ dc->desc = "SiI3112A SATA controller";
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo sii3112_pci_info = {
+ .name = TYPE_SII3112_PCI,
+ .parent = TYPE_PCI_IDE,
+ .instance_size = sizeof(SiI3112PCIState),
+ .class_init = sii3112_pci_class_init,
+};
+
+static void sii3112_register_types(void)
+{
+ type_register_static(&sii3112_pci_info);
+}
+
+type_init(sii3112_register_types)
diff --git a/hw/ide/trace-events b/hw/ide/trace-events
new file mode 100644
index 000000000..15d7921f1
--- /dev/null
+++ b/hw/ide/trace-events
@@ -0,0 +1,124 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# core.c
+# portio
+ide_ioport_read(uint32_t addr, const char *reg, uint32_t val, void *bus, void *s) "IDE PIO rd @ 0x%"PRIx32" (%s); val 0x%02"PRIx32"; bus %p IDEState %p"
+ide_ioport_write(uint32_t addr, const char *reg, uint32_t val, void *bus, void *s) "IDE PIO wr @ 0x%"PRIx32" (%s); val 0x%02"PRIx32"; bus %p IDEState %p"
+ide_status_read(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO rd @ 0x%"PRIx32" (Alt Status); val 0x%02"PRIx32"; bus %p; IDEState %p"
+ide_ctrl_write(uint32_t addr, uint32_t val, void *bus) "IDE PIO wr @ 0x%"PRIx32" (Device Control); val 0x%02"PRIx32"; bus %p"
+# Warning: verbose
+ide_data_readw(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO rd @ 0x%"PRIx32" (Data: Word); val 0x%04"PRIx32"; bus %p; IDEState %p"
+ide_data_writew(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO wr @ 0x%"PRIx32" (Data: Word); val 0x%04"PRIx32"; bus %p; IDEState %p"
+ide_data_readl(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO rd @ 0x%"PRIx32" (Data: Long); val 0x%08"PRIx32"; bus %p; IDEState %p"
+ide_data_writel(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO wr @ 0x%"PRIx32" (Data: Long); val 0x%08"PRIx32"; bus %p; IDEState %p"
+# misc
+ide_exec_cmd(void *bus, void *state, uint32_t cmd) "IDE exec cmd: bus %p; state %p; cmd 0x%02x"
+ide_cancel_dma_sync_buffered(void *fn, void *req) "invoking cb %p of buffered request %p with -ECANCELED"
+ide_cancel_dma_sync_remaining(void) "draining all remaining requests"
+ide_sector_read(int64_t sector_num, int nsectors) "sector=%"PRId64" nsectors=%d"
+ide_sector_write(int64_t sector_num, int nsectors) "sector=%"PRId64" nsectors=%d"
+ide_reset(void *s) "IDEstate %p"
+ide_bus_reset_aio(void) "aio_cancel"
+ide_dma_cb(void *s, int64_t sector_num, int n, const char *dma) "IDEState %p; sector_num=%"PRId64" n=%d cmd=%s"
+
+# BMDMA HBAs:
+
+# cmd646.c
+bmdma_read_cmd646(uint64_t addr, uint32_t val) "bmdma: readb 0x%"PRIx64" : 0x%02x"
+bmdma_write_cmd646(uint64_t addr, uint64_t val) "bmdma: writeb 0x%"PRIx64" : 0x%02"PRIx64
+
+# pci.c
+bmdma_reset(void) ""
+bmdma_cmd_writeb(uint32_t val) "val: 0x%08x"
+bmdma_addr_read(uint64_t data) "data: 0x%016"PRIx64
+bmdma_addr_write(uint64_t data) "data: 0x%016"PRIx64
+
+# piix.c
+bmdma_read(uint64_t addr, uint8_t val) "bmdma: readb 0x%"PRIx64" : 0x%02x"
+bmdma_write(uint64_t addr, uint64_t val) "bmdma: writeb 0x%"PRIx64" : 0x%02"PRIx64
+
+# sii3112.c
+sii3112_read(int size, uint64_t addr, uint64_t val) "bmdma: read (size %d) 0x%"PRIx64" : 0x%02"PRIx64
+sii3112_write(int size, uint64_t addr, uint64_t val) "bmdma: write (size %d) 0x%"PRIx64" : 0x%02"PRIx64
+sii3112_set_irq(int channel, int level) "channel %d level %d"
+
+# via.c
+bmdma_read_via(uint64_t addr, uint32_t val) "bmdma: readb 0x%"PRIx64" : 0x%02x"
+bmdma_write_via(uint64_t addr, uint64_t val) "bmdma: writeb 0x%"PRIx64" : 0x%02"PRIx64
+
+# atapi.c
+cd_read_sector_sync(int lba) "lba=%d"
+cd_read_sector_cb(int lba, int ret) "lba=%d ret=%d"
+cd_read_sector(int lba) "lba=%d"
+ide_atapi_cmd_error(void *s, int sense_key, int asc) "IDEState: %p; sense=0x%x asc=0x%x"
+ide_atapi_cmd_reply_end(void *s, int tx_size, int elem_tx_size, int32_t index) "IDEState %p; reply: tx_size=%d elem_tx_size=%d index=%"PRId32
+ide_atapi_cmd_reply_end_eot(void *s, int status) "IDEState: %p; end of transfer, status=0x%x"
+ide_atapi_cmd_reply_end_bcl(void *s, int bcl) "IDEState: %p; byte_count_limit=%d"
+ide_atapi_cmd_reply_end_new(void *s, int status) "IDEState: %p; new transfer started, status=0x%x"
+ide_atapi_cmd_check_status(void *s) "IDEState: %p"
+ide_atapi_cmd_read(void *s, const char *method, int lba, int nb_sectors) "IDEState: %p; read %s: LBA=%d nb_sectors=%d"
+ide_atapi_cmd(void *s, uint8_t cmd) "IDEState: %p; cmd: 0x%02x"
+ide_atapi_cmd_read_dma_cb_aio(void *s, int lba, int n) "IDEState: %p; aio read: lba=%d n=%d"
+# Warning: Verbose
+ide_atapi_cmd_packet(void *s, uint16_t limit, const char *packet) "IDEState: %p; limit=0x%x packet: %s"
+
+# ahci.c
+ahci_port_read(void *s, int port, const char *reg, int offset, uint32_t ret) "ahci(%p)[%d]: port read [reg:%s] @ 0x%x: 0x%08x"
+ahci_port_read_default(void *s, int port, const char *reg, int offset) "ahci(%p)[%d]: unimplemented port read [reg:%s] @ 0x%x"
+ahci_irq_raise(void *s) "ahci(%p): raise irq"
+ahci_irq_lower(void *s) "ahci(%p): lower irq"
+ahci_check_irq(void *s, uint32_t old, uint32_t new) "ahci(%p): check irq 0x%08x --> 0x%08x"
+ahci_trigger_irq(void *s, int port, const char *name, uint32_t val, uint32_t old, uint32_t new, uint32_t effective) "ahci(%p)[%d]: trigger irq +%s (0x%08x); irqstat: 0x%08x --> 0x%08x; effective: 0x%08x"
+ahci_port_write(void *s, int port, const char *reg, int offset, uint32_t val) "ahci(%p)[%d]: port write [reg:%s] @ 0x%x: 0x%08x"
+ahci_port_write_unimpl(void *s, int port, const char *reg, int offset, uint32_t val) "ahci(%p)[%d]: unimplemented port write [reg:%s] @ 0x%x: 0x%08x"
+ahci_mem_read_32(void *s, uint64_t addr, uint32_t val) "ahci(%p): mem read @ 0x%"PRIx64": 0x%08x"
+ahci_mem_read_32_default(void *s, uint64_t addr, uint32_t val) "ahci(%p): mem read @ 0x%"PRIx64": 0x%08x"
+ahci_mem_read_32_host(void *s, const char *reg, uint64_t addr, uint32_t val) "ahci(%p): mem read [reg:%s] @ 0x%"PRIx64": 0x%08x"
+ahci_mem_read_32_host_default(void *s, const char *reg, uint64_t addr) "ahci(%p): unimplemented mem read [reg:%s] @ 0x%"PRIx64
+ahci_mem_read(void *s, unsigned size, uint64_t addr, uint64_t val) "ahci(%p): read%u @ 0x%"PRIx64": 0x%016"PRIx64
+ahci_mem_write(void *s, unsigned size, uint64_t addr, uint64_t val) "ahci(%p): write%u @ 0x%"PRIx64": 0x%016"PRIx64
+ahci_mem_write_host_unimpl(void *s, unsigned size, const char *reg, uint64_t addr) "ahci(%p) unimplemented write%u [reg:%s] @ 0x%"PRIx64
+ahci_mem_write_host(void *s, unsigned size, const char *reg, uint64_t addr, uint64_t val) "ahci(%p) write%u [reg:%s] @ 0x%"PRIx64": 0x%016"PRIx64
+ahci_mem_write_unimpl(void *s, unsigned size, uint64_t addr, uint64_t val) "ahci(%p): write%u to unknown register 0x%"PRIx64": 0x%016"PRIx64
+ahci_set_signature(void *s, int port, uint8_t nsector, uint8_t sector, uint8_t lcyl, uint8_t hcyl, uint32_t sig) "ahci(%p)[%d]: set signature sector:0x%02x nsector:0x%02x lcyl:0x%02x hcyl:0x%02x (cumulatively: 0x%08x)"
+ahci_reset_port(void *s, int port) "ahci(%p)[%d]: reset port"
+ahci_unmap_fis_address_null(void *s, int port) "ahci(%p)[%d]: Attempt to unmap NULL FIS address"
+ahci_unmap_clb_address_null(void *s, int port) "ahci(%p)[%d]: Attempt to unmap NULL CLB address"
+ahci_populate_sglist(void *s, int port) "ahci(%p)[%d]"
+ahci_populate_sglist_no_prdtl(void *s, int port, uint16_t opts) "ahci(%p)[%d]: no sg list given by guest: 0x%04x"
+ahci_populate_sglist_no_map(void *s, int port) "ahci(%p)[%d]: DMA mapping failed"
+ahci_populate_sglist_short_map(void *s, int port) "ahci(%p)[%d]: mapped less than expected"
+ahci_populate_sglist_bad_offset(void *s, int port, int off_idx, int64_t off_pos) "ahci(%p)[%d]: Incorrect offset! off_idx: %d, off_pos: %"PRId64
+ncq_finish(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: NCQ transfer finished"
+execute_ncq_command_read(void *s, int port, uint8_t tag, int count, int64_t lba) "ahci(%p)[%d][tag:%d]: NCQ reading %d sectors from LBA %"PRId64
+execute_ncq_command_unsup(void *s, int port, uint8_t tag, uint8_t cmd) "ahci(%p)[%d][tag:%d]: error: unsupported NCQ command (0x%02x) received"
+process_ncq_command_mismatch(void *s, int port, uint8_t tag, uint8_t slot) "ahci(%p)[%d][tag:%d]: Warning: NCQ slot (%d) did not match the given tag"
+process_ncq_command_aux(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: Warn: Attempt to use NCQ auxiliary fields"
+process_ncq_command_prioicc(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: Warn: Unsupported attempt to use PRIO/ICC fields"
+process_ncq_command_fua(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: Warn: Unsupported attempt to use Force Unit Access"
+process_ncq_command_rarc(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: Warn: Unsupported attempt to use Rebuild Assist"
+process_ncq_command_large(void *s, int port, uint8_t tag, size_t prdtl, size_t size) "ahci(%p)[%d][tag:%d]: Warn: PRDTL (0x%zx) does not match requested size (0x%zx)"
+process_ncq_command(void *s, int port, uint8_t tag, uint8_t cmd, uint64_t lba, uint64_t end) "ahci(%p)[%d][tag:%d]: NCQ op 0x%02x on sectors [%"PRId64",%"PRId64"]"
+handle_reg_h2d_fis_pmp(void *s, int port, char b0, char b1, char b2) "ahci(%p)[%d]: Port Multiplier not supported, FIS: 0x%02x-%02x-%02x"
+handle_reg_h2d_fis_res(void *s, int port, char b0, char b1, char b2) "ahci(%p)[%d]: Reserved flags set in H2D Register FIS, FIS: 0x%02x-%02x-%02x"
+handle_cmd_busy(void *s, int port) "ahci(%p)[%d]: engine busy"
+handle_cmd_nolist(void *s, int port) "ahci(%p)[%d]: handle_cmd called without s->dev[port].lst"
+handle_cmd_badport(void *s, int port) "ahci(%p)[%d]: guest accessed unused port"
+handle_cmd_badfis(void *s, int port) "ahci(%p)[%d]: guest provided an invalid cmd FIS"
+handle_cmd_badmap(void *s, int port, uint64_t len) "ahci(%p)[%d]: dma_memory_map failed, 0x%02"PRIx64" != 0x80"
+handle_cmd_unhandled_fis(void *s, int port, uint8_t b0, uint8_t b1, uint8_t b2) "ahci(%p)[%d]: unhandled FIS type. cmd_fis: 0x%02x-%02x-%02x"
+ahci_pio_transfer(void *s, int port, const char *rw, uint32_t size, const char *tgt, const char *sgl) "ahci(%p)[%d]: %sing %d bytes on %s w/%s sglist"
+ahci_start_dma(void *s, int port) "ahci(%p)[%d]: start dma"
+ahci_dma_prepare_buf(void *s, int port, int32_t io_buffer_size, int32_t limit) "ahci(%p)[%d]: prepare buf limit=%"PRId32" prepared=%"PRId32
+ahci_dma_prepare_buf_fail(void *s, int port) "ahci(%p)[%d]: sglist population failed"
+ahci_dma_rw_buf(void *s, int port, int l) "ahci(%p)[%d] len=0x%x"
+ahci_cmd_done(void *s, int port) "ahci(%p)[%d]: cmd done"
+ahci_reset(void *s) "ahci(%p): HBA reset"
+
+# Warning: Verbose
+handle_reg_h2d_fis_dump(void *s, int port, const char *fis) "ahci(%p)[%d]: %s"
+handle_cmd_fis_dump(void *s, int port, const char *fis) "ahci(%p)[%d]: %s"
+
+# ahci-allwinner.c
+allwinner_ahci_mem_read(void *s, void *a, uint64_t addr, uint64_t val, unsigned size) "ahci(%p): read a=%p addr=0x%"PRIx64" val=0x%"PRIx64", size=%d"
+allwinner_ahci_mem_write(void *s, void *a, uint64_t addr, uint64_t val, unsigned size) "ahci(%p): write a=%p addr=0x%"PRIx64" val=0x%"PRIx64", size=%d"
diff --git a/hw/ide/trace.h b/hw/ide/trace.h
new file mode 100644
index 000000000..e060e0aef
--- /dev/null
+++ b/hw/ide/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-hw_ide.h"
diff --git a/hw/ide/via.c b/hw/ide/via.c
new file mode 100644
index 000000000..82def819c
--- /dev/null
+++ b/hw/ide/via.c
@@ -0,0 +1,243 @@
+/*
+ * QEMU IDE Emulation: PCI VIA82C686B support.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2006 Openedhand Ltd.
+ * Copyright (c) 2010 Huacai Chen <zltjiangshi@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/pci/pci.h"
+#include "migration/vmstate.h"
+#include "qemu/module.h"
+#include "sysemu/dma.h"
+#include "hw/isa/vt82c686.h"
+#include "hw/ide/pci.h"
+#include "trace.h"
+
+static uint64_t bmdma_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ BMDMAState *bm = opaque;
+ uint32_t val;
+
+ if (size != 1) {
+ return ((uint64_t)1 << (size * 8)) - 1;
+ }
+
+ switch (addr & 3) {
+ case 0:
+ val = bm->cmd;
+ break;
+ case 2:
+ val = bm->status;
+ break;
+ default:
+ val = 0xff;
+ break;
+ }
+
+ trace_bmdma_read_via(addr, val);
+ return val;
+}
+
+static void bmdma_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ BMDMAState *bm = opaque;
+
+ if (size != 1) {
+ return;
+ }
+
+ trace_bmdma_write_via(addr, val);
+ switch (addr & 3) {
+ case 0:
+ bmdma_cmd_writeb(bm, val);
+ break;
+ case 2:
+ bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06);
+ break;
+ default:;
+ }
+}
+
+static const MemoryRegionOps via_bmdma_ops = {
+ .read = bmdma_read,
+ .write = bmdma_write,
+};
+
+static void bmdma_setup_bar(PCIIDEState *d)
+{
+ int i;
+
+ memory_region_init(&d->bmdma_bar, OBJECT(d), "via-bmdma-container", 16);
+ for(i = 0;i < 2; i++) {
+ BMDMAState *bm = &d->bmdma[i];
+
+ memory_region_init_io(&bm->extra_io, OBJECT(d), &via_bmdma_ops, bm,
+ "via-bmdma", 4);
+ memory_region_add_subregion(&d->bmdma_bar, i * 8, &bm->extra_io);
+ memory_region_init_io(&bm->addr_ioport, OBJECT(d),
+ &bmdma_addr_ioport_ops, bm, "bmdma", 4);
+ memory_region_add_subregion(&d->bmdma_bar, i * 8 + 4, &bm->addr_ioport);
+ }
+}
+
+static void via_ide_set_irq(void *opaque, int n, int level)
+{
+ PCIDevice *d = PCI_DEVICE(opaque);
+
+ if (level) {
+ d->config[0x70 + n * 8] |= 0x80;
+ } else {
+ d->config[0x70 + n * 8] &= ~0x80;
+ }
+
+ via_isa_set_irq(pci_get_function_0(d), 14 + n, level);
+}
+
+static void via_ide_reset(DeviceState *dev)
+{
+ PCIIDEState *d = PCI_IDE(dev);
+ PCIDevice *pd = PCI_DEVICE(dev);
+ uint8_t *pci_conf = pd->config;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ ide_bus_reset(&d->bus[i]);
+ }
+
+ pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_WAIT);
+ pci_set_word(pci_conf + PCI_STATUS, PCI_STATUS_FAST_BACK |
+ PCI_STATUS_DEVSEL_MEDIUM);
+
+ pci_set_long(pci_conf + PCI_BASE_ADDRESS_0, 0x000001f0);
+ pci_set_long(pci_conf + PCI_BASE_ADDRESS_1, 0x000003f4);
+ pci_set_long(pci_conf + PCI_BASE_ADDRESS_2, 0x00000170);
+ pci_set_long(pci_conf + PCI_BASE_ADDRESS_3, 0x00000374);
+ pci_set_long(pci_conf + PCI_BASE_ADDRESS_4, 0x0000cc01); /* BMIBA: 20-23h */
+ pci_set_long(pci_conf + PCI_INTERRUPT_LINE, 0x0000010e);
+
+ /* IDE chip enable, IDE configuration 1/2, IDE FIFO Configuration*/
+ pci_set_long(pci_conf + 0x40, 0x0a090600);
+ /* IDE misc configuration 1/2/3 */
+ pci_set_long(pci_conf + 0x44, 0x00c00068);
+ /* IDE Timing control */
+ pci_set_long(pci_conf + 0x48, 0xa8a8a8a8);
+ /* IDE Address Setup Time */
+ pci_set_long(pci_conf + 0x4c, 0x000000ff);
+ /* UltraDMA Extended Timing Control*/
+ pci_set_long(pci_conf + 0x50, 0x07070707);
+ /* UltraDMA FIFO Control */
+ pci_set_long(pci_conf + 0x54, 0x00000004);
+ /* IDE primary sector size */
+ pci_set_long(pci_conf + 0x60, 0x00000200);
+ /* IDE secondary sector size */
+ pci_set_long(pci_conf + 0x68, 0x00000200);
+ /* PCI PM Block */
+ pci_set_long(pci_conf + 0xc0, 0x00020001);
+}
+
+static void via_ide_realize(PCIDevice *dev, Error **errp)
+{
+ PCIIDEState *d = PCI_IDE(dev);
+ DeviceState *ds = DEVICE(dev);
+ uint8_t *pci_conf = dev->config;
+ int i;
+
+ pci_config_set_prog_interface(pci_conf, 0x8a); /* legacy mode */
+ pci_set_long(pci_conf + PCI_CAPABILITY_LIST, 0x000000c0);
+ dev->wmask[PCI_INTERRUPT_LINE] = 0;
+ dev->wmask[PCI_CLASS_PROG] = 5;
+
+ memory_region_init_io(&d->data_bar[0], OBJECT(d), &pci_ide_data_le_ops,
+ &d->bus[0], "via-ide0-data", 8);
+ pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->data_bar[0]);
+
+ memory_region_init_io(&d->cmd_bar[0], OBJECT(d), &pci_ide_cmd_le_ops,
+ &d->bus[0], "via-ide0-cmd", 4);
+ pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd_bar[0]);
+
+ memory_region_init_io(&d->data_bar[1], OBJECT(d), &pci_ide_data_le_ops,
+ &d->bus[1], "via-ide1-data", 8);
+ pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_IO, &d->data_bar[1]);
+
+ memory_region_init_io(&d->cmd_bar[1], OBJECT(d), &pci_ide_cmd_le_ops,
+ &d->bus[1], "via-ide1-cmd", 4);
+ pci_register_bar(dev, 3, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd_bar[1]);
+
+ bmdma_setup_bar(d);
+ pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar);
+
+ qdev_init_gpio_in(ds, via_ide_set_irq, 2);
+ for (i = 0; i < 2; i++) {
+ ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2);
+ ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i));
+
+ bmdma_init(&d->bus[i], &d->bmdma[i], d);
+ d->bmdma[i].bus = &d->bus[i];
+ ide_register_restart_cb(&d->bus[i]);
+ }
+}
+
+static void via_ide_exitfn(PCIDevice *dev)
+{
+ PCIIDEState *d = PCI_IDE(dev);
+ unsigned i;
+
+ for (i = 0; i < 2; ++i) {
+ memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].extra_io);
+ memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].addr_ioport);
+ }
+}
+
+static void via_ide_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ dc->reset = via_ide_reset;
+ dc->vmsd = &vmstate_ide_pci;
+ /* Reason: only works as function of VIA southbridge */
+ dc->user_creatable = false;
+
+ k->realize = via_ide_realize;
+ k->exit = via_ide_exitfn;
+ k->vendor_id = PCI_VENDOR_ID_VIA;
+ k->device_id = PCI_DEVICE_ID_VIA_IDE;
+ k->revision = 0x06;
+ k->class_id = PCI_CLASS_STORAGE_IDE;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo via_ide_info = {
+ .name = "via-ide",
+ .parent = TYPE_PCI_IDE,
+ .class_init = via_ide_class_init,
+};
+
+static void via_ide_register_types(void)
+{
+ type_register_static(&via_ide_info);
+}
+
+type_init(via_ide_register_types)